mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge remote-tracking branch 'upstream/main' into codesome/sync-prom
Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com>
This commit is contained in:
commit
41649ceb1b
|
@ -1,7 +1,7 @@
|
|||
go:
|
||||
# Whenever the Go version is updated here,
|
||||
# .circle/config.yml should also be updated.
|
||||
version: 1.19
|
||||
version: 1.20
|
||||
repository:
|
||||
path: github.com/prometheus/prometheus
|
||||
build:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Changelog
|
||||
|
||||
## 2.43.0-rc.0 / 2023-03-09
|
||||
## 2.43.0 / 2023-03-21
|
||||
|
||||
We are working on some performance improvements in Prometheus, which are only
|
||||
built into Prometheus when compiling it using the Go tag `stringlabels`
|
||||
|
@ -8,9 +8,9 @@ built into Prometheus when compiling it using the Go tag `stringlabels`
|
|||
structure for labels that uses a single string to hold all the label/values,
|
||||
resulting in a smaller heap size and some speedups in most cases. We would like
|
||||
to encourage users who are interested in these improvements to help us measure
|
||||
the gains on their production architecture. Building Prometheus from source
|
||||
with the `stringlabels` Go tag and providing feedback on its effectiveness in
|
||||
their specific use cases would be incredibly helpful to us. #10991
|
||||
the gains on their production architecture. We are providing release artefacts
|
||||
`2.43.0+stringlabels` and Docker images tagged `v2.43.0-stringlabels` with those
|
||||
improvements for testing. #10991
|
||||
|
||||
* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487
|
||||
* [FEATURE] Scrape: Add `include_scrape_configs` to include scrape configs from different files. #12019
|
||||
|
|
5
Makefile
5
Makefile
|
@ -133,3 +133,8 @@ bench_tsdb: $(PROMU)
|
|||
@$(GO) tool pprof --alloc_space -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.alloc.svg
|
||||
@$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/block.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/blockprof.svg
|
||||
@$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mutex.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/mutexprof.svg
|
||||
|
||||
.PHONY: cli-documentation
|
||||
cli-documentation:
|
||||
$(GO) run ./cmd/prometheus/ --write-documentation > docs/command-line/prometheus.md
|
||||
$(GO) run ./cmd/promtool/ write-documentation > docs/command-line/promtool.md
|
||||
|
|
|
@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
|||
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
||||
|
||||
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
|
||||
|
||||
ifeq ($(GOHOSTARCH),amd64)
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
||||
# Only supported on amd64
|
||||
|
@ -207,7 +209,7 @@ common-tarball: promu
|
|||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
|
||||
-f $(DOCKERFILE_PATH) \
|
||||
--build-arg ARCH="$*" \
|
||||
--build-arg OS="linux" \
|
||||
|
@ -216,19 +218,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
|
|||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
|
||||
|
||||
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
|
||||
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
||||
|
||||
.PHONY: common-docker-manifest
|
||||
common-docker-manifest:
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
|
||||
|
||||
.PHONY: promu
|
||||
promu: $(PROMU)
|
||||
|
|
|
@ -48,7 +48,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
|||
| v2.41 | 2022-12-14 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) |
|
||||
| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.44 | 2023-04-19 | **searching for volunteer** |
|
||||
| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v2.45 | 2023-05-31 | **searching for volunteer** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
|
|
@ -70,6 +70,7 @@ import (
|
|||
"github.com/prometheus/prometheus/tracing"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/agent"
|
||||
"github.com/prometheus/prometheus/util/documentcli"
|
||||
"github.com/prometheus/prometheus/util/logging"
|
||||
prom_runtime "github.com/prometheus/prometheus/util/runtime"
|
||||
"github.com/prometheus/prometheus/web"
|
||||
|
@ -413,6 +414,15 @@ func main() {
|
|||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
||||
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
|
||||
if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil {
|
||||
os.Exit(1)
|
||||
return err
|
||||
}
|
||||
os.Exit(0)
|
||||
return nil
|
||||
}).Bool()
|
||||
|
||||
_, err := a.Parse(os.Args[1:])
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err))
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
@ -483,3 +484,31 @@ func TestModeSpecificFlags(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocumentation(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.SkipNow()
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, promPath, "-test.main", "--write-documentation")
|
||||
|
||||
var stdout bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
if exitError.ExitCode() != 0 {
|
||||
fmt.Println("Command failed with non-zero exit code")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
generatedContent := strings.ReplaceAll(stdout.String(), filepath.Base(promPath), strings.TrimSuffix(filepath.Base(promPath), ".test"))
|
||||
|
||||
expectedContent, err := os.ReadFile(filepath.Join("..", "..", "docs", "command-line", "prometheus.md"))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
||||
}
|
||||
|
|
|
@ -59,6 +59,7 @@ import (
|
|||
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/scrape"
|
||||
"github.com/prometheus/prometheus/util/documentcli"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -223,6 +224,8 @@ func main() {
|
|||
|
||||
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
||||
|
||||
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
|
||||
|
||||
parsedCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
|
||||
|
||||
var p printer
|
||||
|
@ -329,6 +332,8 @@ func main() {
|
|||
|
||||
case importRulesCmd.FullCommand():
|
||||
os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
|
||||
case documentationCmd.FullCommand():
|
||||
os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout)))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
@ -21,6 +23,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
@ -433,3 +436,31 @@ func TestExitCodes(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocumentation(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.SkipNow()
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, promtoolPath, "-test.main", "write-documentation")
|
||||
|
||||
var stdout bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
if exitError.ExitCode() != 0 {
|
||||
fmt.Println("Command failed with non-zero exit code")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
generatedContent := strings.ReplaceAll(stdout.String(), filepath.Base(promtoolPath), strings.TrimSuffix(filepath.Base(promtoolPath), ".test"))
|
||||
|
||||
expectedContent, err := os.ReadFile(filepath.Join("..", "..", "docs", "command-line", "promtool.md"))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
||||
}
|
||||
|
|
4
docs/command-line/index.md
Normal file
4
docs/command-line/index.md
Normal file
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
title: Command Line
|
||||
sort_rank: 9
|
||||
---
|
59
docs/command-line/prometheus.md
Normal file
59
docs/command-line/prometheus.md
Normal file
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
title: prometheus
|
||||
---
|
||||
|
||||
# prometheus
|
||||
|
||||
The Prometheus monitoring server
|
||||
|
||||
|
||||
|
||||
## Flags
|
||||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). | |
|
||||
| <code class="text-nowrap">--version</code> | Show application version. | |
|
||||
| <code class="text-nowrap">--config.file</code> | Prometheus configuration file path. | `prometheus.yml` |
|
||||
| <code class="text-nowrap">--web.listen-address</code> | Address to listen on for UI, API, and telemetry. | `0.0.0.0:9090` |
|
||||
| <code class="text-nowrap">--web.config.file</code> | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | |
|
||||
| <code class="text-nowrap">--web.read-timeout</code> | Maximum duration before timing out read of the request, and closing idle connections. | `5m` |
|
||||
| <code class="text-nowrap">--web.max-connections</code> | Maximum number of simultaneous connections. | `512` |
|
||||
| <code class="text-nowrap">--web.external-url</code> | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | |
|
||||
| <code class="text-nowrap">--web.route-prefix</code> | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | |
|
||||
| <code class="text-nowrap">--web.user-assets</code> | Path to static asset directory, available at /user. | |
|
||||
| <code class="text-nowrap">--web.enable-lifecycle</code> | Enable shutdown and reload via HTTP request. | `false` |
|
||||
| <code class="text-nowrap">--web.enable-admin-api</code> | Enable API endpoints for admin control actions. | `false` |
|
||||
| <code class="text-nowrap">--web.enable-remote-write-receiver</code> | Enable API endpoint accepting remote write requests. | `false` |
|
||||
| <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` |
|
||||
| <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` |
|
||||
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
|
||||
| <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com' | `.*` |
|
||||
| <code class="text-nowrap">--storage.tsdb.path</code> | Base path for metrics storage. Use with server mode only. | `data/` |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention</code> | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention.size</code> | Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.no-lockfile</code> | Do not create lockfile in data directory. Use with server mode only. | `false` |
|
||||
| <code class="text-nowrap">--storage.tsdb.head-chunks-write-queue-size</code> | Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental. Use with server mode only. | `0` |
|
||||
| <code class="text-nowrap">--storage.agent.path</code> | Base path for metrics storage. Use with agent mode only. | `data-agent/` |
|
||||
| <code class="text-nowrap">--storage.agent.wal-compression</code> | Compress the agent WAL. Use with agent mode only. | `true` |
|
||||
| <code class="text-nowrap">--storage.agent.retention.min-time</code> | Minimum age samples may be before being considered for deletion when the WAL is truncated Use with agent mode only. | |
|
||||
| <code class="text-nowrap">--storage.agent.retention.max-time</code> | Maximum age samples may be before being forcibly deleted when the WAL is truncated Use with agent mode only. | |
|
||||
| <code class="text-nowrap">--storage.agent.no-lockfile</code> | Do not create lockfile in data directory. Use with agent mode only. | `false` |
|
||||
| <code class="text-nowrap">--storage.remote.flush-deadline</code> | How long to wait flushing sample on shutdown or config reload. | `1m` |
|
||||
| <code class="text-nowrap">--storage.remote.read-sample-limit</code> | Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types. Use with server mode only. | `5e7` |
|
||||
| <code class="text-nowrap">--storage.remote.read-concurrent-limit</code> | Maximum number of concurrent remote read calls. 0 means no limit. Use with server mode only. | `10` |
|
||||
| <code class="text-nowrap">--storage.remote.read-max-bytes-in-frame</code> | Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default. Use with server mode only. | `1048576` |
|
||||
| <code class="text-nowrap">--rules.alert.for-outage-tolerance</code> | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` |
|
||||
| <code class="text-nowrap">--rules.alert.for-grace-period</code> | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` |
|
||||
| <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` |
|
||||
| <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` |
|
||||
| <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` |
|
||||
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
||||
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
||||
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
|
||||
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
|
||||
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |
|
||||
|
||||
|
536
docs/command-line/promtool.md
Normal file
536
docs/command-line/promtool.md
Normal file
|
@ -0,0 +1,536 @@
|
|||
---
|
||||
title: promtool
|
||||
---
|
||||
|
||||
# promtool
|
||||
|
||||
Tooling for the Prometheus monitoring system.
|
||||
|
||||
|
||||
|
||||
## Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). |
|
||||
| <code class="text-nowrap">--version</code> | Show application version. |
|
||||
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. |
|
||||
|
||||
|
||||
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
| --- | --- |
|
||||
| help | Show help. |
|
||||
| check | Check the resources for validity. |
|
||||
| query | Run query against a Prometheus server. |
|
||||
| debug | Fetch debug information. |
|
||||
| test | Unit testing. |
|
||||
| tsdb | Run tsdb commands. |
|
||||
|
||||
|
||||
|
||||
|
||||
### `promtool help`
|
||||
|
||||
Show help.
|
||||
|
||||
|
||||
|
||||
#### Arguments
|
||||
|
||||
| Argument | Description |
|
||||
| --- | --- |
|
||||
| command | Show help on command. |
|
||||
|
||||
|
||||
|
||||
|
||||
### `promtool check`
|
||||
|
||||
Check the resources for validity.
|
||||
|
||||
|
||||
|
||||
#### Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">--extended</code> | Print extended information related to the cardinality of the metrics. |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool check service-discovery`
|
||||
|
||||
Perform service discovery for the given job name and report the results, including relabeling.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--timeout</code> | The time to wait for discovery results. | `30s` |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| config-file | The prometheus config file. | Yes |
|
||||
| job | The job to run service discovery for. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool check config`
|
||||
|
||||
Check if the config files are valid or not.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--syntax-only</code> | Only check the config file syntax, ignoring file and content validation referenced in the config | |
|
||||
| <code class="text-nowrap">--lint</code> | Linting checks to apply to the rules specified in the config. Available options are: all, duplicate-rules, none. Use --lint=none to disable linting | `duplicate-rules` |
|
||||
| <code class="text-nowrap">--lint-fatal</code> | Make lint errors exit with exit code 3. | `false` |
|
||||
| <code class="text-nowrap">--agent</code> | Check config file for Prometheus in Agent mode. | |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| config-files | The config files to check. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool check web-config`
|
||||
|
||||
Check if the web config files are valid or not.
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| web-config-files | The config files to check. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool check rules`
|
||||
|
||||
Check if the rule files are valid or not.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--lint</code> | Linting checks to apply. Available options are: all, duplicate-rules, none. Use --lint=none to disable linting | `duplicate-rules` |
|
||||
| <code class="text-nowrap">--lint-fatal</code> | Make lint errors exit with exit code 3. | `false` |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| rule-files | The rule files to check. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool check metrics`
|
||||
|
||||
Pass Prometheus metrics over stdin to lint them for consistency and correctness.
|
||||
|
||||
examples:
|
||||
|
||||
$ cat metrics.prom | promtool check metrics
|
||||
|
||||
$ curl -s http://localhost:9090/metrics | promtool check metrics
|
||||
|
||||
|
||||
|
||||
### `promtool query`
|
||||
|
||||
Run query against a Prometheus server.
|
||||
|
||||
|
||||
|
||||
#### Flags
|
||||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">-o</code>, <code class="text-nowrap">--format</code> | Output format of the query. | `promql` |
|
||||
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file for promtool to connect to Prometheus. | |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool query instant`
|
||||
|
||||
Run instant query.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">--time</code> | Query evaluation time (RFC3339 or Unix timestamp). |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| server | Prometheus server to query. | Yes |
|
||||
| expr | PromQL query expression. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool query range`
|
||||
|
||||
Run range query.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">--header</code> | Extra headers to send to server. |
|
||||
| <code class="text-nowrap">--start</code> | Query range start time (RFC3339 or Unix timestamp). |
|
||||
| <code class="text-nowrap">--end</code> | Query range end time (RFC3339 or Unix timestamp). |
|
||||
| <code class="text-nowrap">--step</code> | Query step size (duration). |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| server | Prometheus server to query. | Yes |
|
||||
| expr | PromQL query expression. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool query series`
|
||||
|
||||
Run series query.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. |
|
||||
| <code class="text-nowrap">--start</code> | Start time (RFC3339 or Unix timestamp). |
|
||||
| <code class="text-nowrap">--end</code> | End time (RFC3339 or Unix timestamp). |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| server | Prometheus server to query. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool query labels`
|
||||
|
||||
Run labels query.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">--start</code> | Start time (RFC3339 or Unix timestamp). |
|
||||
| <code class="text-nowrap">--end</code> | End time (RFC3339 or Unix timestamp). |
|
||||
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| server | Prometheus server to query. | Yes |
|
||||
| name | Label name to provide label values for. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
### `promtool debug`
|
||||
|
||||
Fetch debug information.
|
||||
|
||||
|
||||
|
||||
##### `promtool debug pprof`
|
||||
|
||||
Fetch profiling debug information.
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| server | Prometheus server to get pprof files from. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool debug metrics`
|
||||
|
||||
Fetch metrics debug information.
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| server | Prometheus server to get metrics from. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool debug all`
|
||||
|
||||
Fetch all debug information.
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| server | Prometheus server to get all debug information from. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
### `promtool test`
|
||||
|
||||
Unit testing.
|
||||
|
||||
|
||||
|
||||
##### `promtool test rules`
|
||||
|
||||
Unit tests for rules.
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| test-rule-file | The unit test file. | Yes |
|
||||
|
||||
|
||||
|
||||
|
||||
### `promtool tsdb`
|
||||
|
||||
Run tsdb commands.
|
||||
|
||||
|
||||
|
||||
##### `promtool tsdb bench`
|
||||
|
||||
Run benchmarks.
|
||||
|
||||
|
||||
|
||||
##### `promtool tsdb bench write`
|
||||
|
||||
Run a write performance benchmark.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--out</code> | Set the output path. | `benchout` |
|
||||
| <code class="text-nowrap">--metrics</code> | Number of metrics to read. | `10000` |
|
||||
| <code class="text-nowrap">--scrapes</code> | Number of scrapes to simulate. | `3000` |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| file | Input file with samples data, default is (../../tsdb/testdata/20kseries.json). | `../../tsdb/testdata/20kseries.json` |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool tsdb analyze`
|
||||
|
||||
Analyze churn, label pair cardinality and compaction efficiency.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--limit</code> | How many items to show in each list. | `20` |
|
||||
| <code class="text-nowrap">--extended</code> | Run extended analysis. | |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| db path | Database path (default is data/). | `data/` |
|
||||
| block id | Block to analyze (default is the last block). | |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool tsdb list`
|
||||
|
||||
List tsdb blocks.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">-r</code>, <code class="text-nowrap">--human-readable</code> | Print human readable values. |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| db path | Database path (default is data/). | `data/` |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool tsdb dump`
|
||||
|
||||
Dump samples from a TSDB.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
|
||||
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
|
||||
| <code class="text-nowrap">--match</code> | Series selector. | `{__name__=~'(?s:.*)'}` |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| db path | Database path (default is data/). | `data/` |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool tsdb create-blocks-from`
|
||||
|
||||
[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">-r</code>, <code class="text-nowrap">--human-readable</code> | Print human readable values. |
|
||||
| <code class="text-nowrap">-q</code>, <code class="text-nowrap">--quiet</code> | Do not print created blocks. |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool tsdb create-blocks-from openmetrics`
|
||||
|
||||
Import samples from OpenMetrics input and produce TSDB blocks. Please refer to the storage docs for more details.
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Default | Required |
|
||||
| --- | --- | --- | --- |
|
||||
| input file | OpenMetrics file to read samples from. | | Yes |
|
||||
| output directory | Output directory for generated blocks. | `data/` | |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool tsdb create-blocks-from rules`
|
||||
|
||||
Create blocks of data for new recording rules.
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file for promtool to connect to Prometheus. | |
|
||||
| <code class="text-nowrap">--url</code> | The URL for the Prometheus API with the data where the rule will be backfilled from. | `http://localhost:9090` |
|
||||
| <code class="text-nowrap">--start</code> | The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required. | |
|
||||
| <code class="text-nowrap">--end</code> | If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hours ago. Must be a RFC3339 formatted date or Unix timestamp. | |
|
||||
| <code class="text-nowrap">--output-dir</code> | Output directory for generated blocks. | `data/` |
|
||||
| <code class="text-nowrap">--eval-interval</code> | How frequently to evaluate rules when backfilling if a value is not set in the recording rule files. | `60s` |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| rule-files | A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated. | Yes |
|
||||
|
||||
|
|
@ -205,7 +205,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# Configures the scrape request's TLS settings.
|
||||
tls_config:
|
||||
|
@ -218,7 +218,7 @@ tls_config:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -447,7 +447,7 @@ tls_config:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -535,7 +535,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -544,7 +544,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -646,7 +646,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -655,7 +655,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -733,7 +733,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -742,7 +742,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -791,7 +791,7 @@ host: <string>
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -849,7 +849,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
```
|
||||
|
||||
|
@ -966,7 +966,7 @@ host: <string>
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -1026,7 +1026,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
```
|
||||
|
||||
|
@ -1173,7 +1173,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -1182,7 +1182,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -1448,7 +1448,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -1457,7 +1457,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
```
|
||||
|
||||
See [this example Prometheus configuration file](/documentation/examples/prometheus-puppetdb.yml)
|
||||
|
@ -1665,7 +1665,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -1674,7 +1674,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -1759,7 +1759,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -1768,7 +1768,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -1842,7 +1842,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -1851,7 +1851,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -2067,7 +2067,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -2076,7 +2076,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -2153,7 +2153,7 @@ server: <string>
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -2193,7 +2193,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
```
|
||||
|
||||
The [relabeling phase](#relabel_config) is the preferred and more powerful way
|
||||
|
@ -2280,7 +2280,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -2289,7 +2289,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -2361,7 +2361,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -2370,7 +2370,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -2456,7 +2456,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration for connecting to marathon servers
|
||||
tls_config:
|
||||
|
@ -2469,7 +2469,7 @@ tls_config:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -2567,7 +2567,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -2576,7 +2576,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -2753,7 +2753,7 @@ tls_config:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -2762,7 +2762,7 @@ tls_config:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# Refresh interval to re-read the app instance list.
|
||||
[ refresh_interval: <duration> | default = 30s ]
|
||||
|
@ -2869,7 +2869,7 @@ tags_filter:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# Optional proxy URL.
|
||||
[ proxy_url: <string> ]
|
||||
|
@ -2878,7 +2878,7 @@ tags_filter:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -2954,7 +2954,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -2963,7 +2963,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -3036,7 +3036,7 @@ oauth2:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -3045,7 +3045,7 @@ oauth2:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
|
@ -3238,7 +3238,7 @@ tls_config:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -3247,7 +3247,7 @@ tls_config:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# List of Azure service discovery configurations.
|
||||
azure_sd_configs:
|
||||
|
@ -3456,7 +3456,7 @@ tls_config:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -3465,7 +3465,7 @@ tls_config:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# Configures the queue used to write to remote storage.
|
||||
queue_config:
|
||||
|
@ -3569,7 +3569,7 @@ tls_config:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <bool> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -3578,7 +3578,7 @@ tls_config:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
|
||||
# Whether to use the external labels as selectors for the remote read endpoint.
|
||||
[ filter_external_labels: <boolean> | default = true ]
|
||||
|
|
|
@ -64,7 +64,7 @@ tls_server_config:
|
|||
# client's most preferred ciphersuite, or the server's most preferred
|
||||
# ciphersuite. If true then the server's preference, as expressed in
|
||||
# the order of elements in cipher_suites, is used.
|
||||
[ prefer_server_cipher_suites: <bool> | default = true ]
|
||||
[ prefer_server_cipher_suites: <boolean> | default = true ]
|
||||
|
||||
# Elliptic curves that will be used in an ECDHE handshake, in preference
|
||||
# order. Available curves are documented in the go documentation:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Feature flags
|
||||
sort_rank: 11
|
||||
sort_rank: 12
|
||||
---
|
||||
|
||||
# Feature flags
|
||||
|
|
|
@ -87,7 +87,7 @@ the following third-party contributions:
|
|||
|
||||
### Ansible
|
||||
|
||||
* [Cloud Alchemy/ansible-prometheus](https://github.com/cloudalchemy/ansible-prometheus)
|
||||
* [prometheus-community/ansible](https://github.com/prometheus-community/ansible)
|
||||
|
||||
### Chef
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Migration
|
||||
sort_rank: 9
|
||||
sort_rank: 10
|
||||
---
|
||||
|
||||
# Prometheus 2.0 migration guide
|
||||
|
|
|
@ -157,9 +157,11 @@ syntax](https://github.com/google/re2/wiki/Syntax).
|
|||
|
||||
Range vector literals work like instant vector literals, except that they
|
||||
select a range of samples back from the current instant. Syntactically, a [time
|
||||
duration](#time-durations) is appended in square brackets (`[]`) at the end of a
|
||||
vector selector to specify how far back in time values should be fetched for
|
||||
each resulting range vector element.
|
||||
duration](#time-durations) is appended in square brackets (`[]`) at the end of
|
||||
a vector selector to specify how far back in time values should be fetched for
|
||||
each resulting range vector element. The range is a closed interval,
|
||||
i.e. samples with timestamps coinciding with either boundary of the range are
|
||||
still included in the selection.
|
||||
|
||||
In this example, we select all the values we have recorded within the last 5
|
||||
minutes for all time series that have the metric name `http_requests_total` and
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: API Stability
|
||||
sort_rank: 10
|
||||
sort_rank: 11
|
||||
---
|
||||
|
||||
# API Stability Guarantees
|
||||
|
|
2
go.mod
2
go.mod
|
@ -1,6 +1,6 @@
|
|||
module github.com/prometheus/prometheus
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
|
||||
|
|
|
@ -192,6 +192,30 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram {
|
|||
//
|
||||
// This method returns a pointer to the receiving histogram for convenience.
|
||||
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
||||
switch {
|
||||
case other.CounterResetHint == h.CounterResetHint:
|
||||
// Adding apples to apples, all good. No need to change anything.
|
||||
case h.CounterResetHint == GaugeType:
|
||||
// Adding something else to a gauge. That's probably OK. Outcome is a gauge.
|
||||
// Nothing to do since the receiver is already marked as gauge.
|
||||
case other.CounterResetHint == GaugeType:
|
||||
// Similar to before, but this time the receiver is "something else" and we have to change it to gauge.
|
||||
h.CounterResetHint = GaugeType
|
||||
case h.CounterResetHint == UnknownCounterReset:
|
||||
// With the receiver's CounterResetHint being "unknown", this could still be legitimate
|
||||
// if the caller knows what they are doing. Outcome is then again "unknown".
|
||||
// No need to do anything since the receiver's CounterResetHint is already "unknown".
|
||||
case other.CounterResetHint == UnknownCounterReset:
|
||||
// Similar to before, but now we have to set the receiver's CounterResetHint to "unknown".
|
||||
h.CounterResetHint = UnknownCounterReset
|
||||
default:
|
||||
// All other cases shouldn't actually happen.
|
||||
// They are a direct collision of CounterReset and NotCounterReset.
|
||||
// Conservatively set the CounterResetHint to "unknown" and isse a warning.
|
||||
h.CounterResetHint = UnknownCounterReset
|
||||
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
|
||||
}
|
||||
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount += otherZeroCount
|
||||
h.Count += other.Count
|
||||
|
@ -414,6 +438,10 @@ func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
|
|||
// of observations, but NOT the sum of observations) is smaller in the receiving
|
||||
// histogram compared to the previous histogram. Otherwise, it returns false.
|
||||
//
|
||||
// This method will shortcut to true if a CounterReset is detected, and shortcut
|
||||
// to false if NotCounterReset is detected. Otherwise it will do the work to detect
|
||||
// a reset.
|
||||
//
|
||||
// Special behavior in case the Schema or the ZeroThreshold are not the same in
|
||||
// both histograms:
|
||||
//
|
||||
|
@ -432,12 +460,23 @@ func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
|
|||
// - Upon a decrease of the Schema, the buckets of the previous histogram are
|
||||
// merged so that they match the new, lower-resolution schema (again without
|
||||
// mutating the provided previous histogram).
|
||||
//
|
||||
// Note that this kind of reset detection is quite expensive. Ideally, resets
|
||||
// are detected at ingest time and stored in the TSDB, so that the reset
|
||||
// information can be read directly from there rather than be detected each time
|
||||
// again.
|
||||
func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
|
||||
if h.CounterResetHint == CounterReset {
|
||||
return true
|
||||
}
|
||||
if h.CounterResetHint == NotCounterReset {
|
||||
return false
|
||||
}
|
||||
// In all other cases of CounterResetHint (UnknownCounterReset and GaugeType),
|
||||
// we go on as we would otherwise, for reasons explained below.
|
||||
//
|
||||
// If the CounterResetHint is UnknownCounterReset, we do not know yet if this histogram comes
|
||||
// with a counter reset. Therefore, we have to do all the detailed work to find out if there
|
||||
// is a counter reset or not.
|
||||
// We do the same if the CounterResetHint is GaugeType, which should not happen, but PromQL still
|
||||
// allows the user to apply functions to gauge histograms that are only meant for counter histograms.
|
||||
// In this case, we treat the gauge histograms as a counter histograms
|
||||
// (and we plan to return a warning about it to the user).
|
||||
if h.Count < previous.Count {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -545,9 +545,12 @@ func (b *Builder) Get(n string) string {
|
|||
}
|
||||
|
||||
// Range calls f on each label in the Builder.
|
||||
// If f calls Set or Del on b then this may affect what callbacks subsequently happen.
|
||||
func (b *Builder) Range(f func(l Label)) {
|
||||
origAdd, origDel := b.add, b.del
|
||||
// Stack-based arrays to avoid heap allocation in most cases.
|
||||
var addStack [1024]Label
|
||||
var delStack [1024]string
|
||||
// Take a copy of add and del, so they are unaffected by calls to Set() or Del().
|
||||
origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...)
|
||||
b.base.Range(func(l Label) {
|
||||
if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) {
|
||||
f(l)
|
||||
|
|
|
@ -599,9 +599,12 @@ func (b *Builder) Get(n string) string {
|
|||
}
|
||||
|
||||
// Range calls f on each label in the Builder.
|
||||
// If f calls Set or Del on b then this may affect what callbacks subsequently happen.
|
||||
func (b *Builder) Range(f func(l Label)) {
|
||||
origAdd, origDel := b.add, b.del
|
||||
// Stack-based arrays to avoid heap allocation in most cases.
|
||||
var addStack [1024]Label
|
||||
var delStack [1024]string
|
||||
// Take a copy of add and del, so they are unaffected by calls to Set() or Del().
|
||||
origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...)
|
||||
b.base.Range(func(l Label) {
|
||||
if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) {
|
||||
f(l)
|
||||
|
|
|
@ -529,6 +529,11 @@ func TestBuilder(t *testing.T) {
|
|||
base: FromStrings("aaa", "111"),
|
||||
want: FromStrings("aaa", "111"),
|
||||
},
|
||||
{
|
||||
base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"),
|
||||
set: []Label{{"aaa", "444"}, {"bbb", "555"}, {"ccc", "666"}},
|
||||
want: FromStrings("aaa", "444", "bbb", "555", "ccc", "666"),
|
||||
},
|
||||
{
|
||||
base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"),
|
||||
del: []string{"bbb"},
|
||||
|
@ -591,7 +596,15 @@ func TestBuilder(t *testing.T) {
|
|||
b.Keep(tcase.keep...)
|
||||
}
|
||||
b.Del(tcase.del...)
|
||||
require.Equal(t, tcase.want, b.Labels(tcase.base))
|
||||
require.Equal(t, tcase.want, b.Labels(EmptyLabels()))
|
||||
|
||||
// Check what happens when we call Range and mutate the builder.
|
||||
b.Range(func(l Label) {
|
||||
if l.Name == "aaa" || l.Name == "bbb" {
|
||||
b.Del(l.Name)
|
||||
}
|
||||
})
|
||||
require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels(tcase.base).Bytes(nil))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -134,21 +134,24 @@ func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) {
|
|||
type Chunk_Encoding int32
|
||||
|
||||
const (
|
||||
Chunk_UNKNOWN Chunk_Encoding = 0
|
||||
Chunk_XOR Chunk_Encoding = 1
|
||||
Chunk_HISTOGRAM Chunk_Encoding = 2
|
||||
Chunk_UNKNOWN Chunk_Encoding = 0
|
||||
Chunk_XOR Chunk_Encoding = 1
|
||||
Chunk_HISTOGRAM Chunk_Encoding = 2
|
||||
Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 3
|
||||
)
|
||||
|
||||
var Chunk_Encoding_name = map[int32]string{
|
||||
0: "UNKNOWN",
|
||||
1: "XOR",
|
||||
2: "HISTOGRAM",
|
||||
3: "FLOAT_HISTOGRAM",
|
||||
}
|
||||
|
||||
var Chunk_Encoding_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"XOR": 1,
|
||||
"HISTOGRAM": 2,
|
||||
"UNKNOWN": 0,
|
||||
"XOR": 1,
|
||||
"HISTOGRAM": 2,
|
||||
"FLOAT_HISTOGRAM": 3,
|
||||
}
|
||||
|
||||
func (x Chunk_Encoding) String() string {
|
||||
|
@ -1143,75 +1146,76 @@ func init() {
|
|||
func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) }
|
||||
|
||||
var fileDescriptor_d938547f84707355 = []byte{
|
||||
// 1081 bytes of a gzipped FileDescriptorProto
|
||||
// 1092 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46,
|
||||
0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0x0b, 0x27, 0x3f, 0xff, 0xa0, 0x71, 0x54, 0x02,
|
||||
0x69, 0x85, 0xa2, 0x90, 0x91, 0xb4, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x5d, 0xf9, 0x80, 0x46, 0x12,
|
||||
0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0x78, 0x2a, 0x77, 0x15, 0x58, 0x7d,
|
||||
0x8f, 0xde, 0xf5, 0x25, 0x7a, 0xdf, 0x07, 0x08, 0xd0, 0x9b, 0x3e, 0x41, 0x51, 0xf8, 0xaa, 0x8f,
|
||||
0x51, 0xec, 0x90, 0x14, 0xa9, 0x38, 0x05, 0x9a, 0xde, 0xed, 0x7c, 0xf3, 0xcd, 0xec, 0xc7, 0xdd,
|
||||
0x99, 0x59, 0x42, 0x43, 0xae, 0x63, 0x2e, 0x7a, 0x71, 0x12, 0xc9, 0x88, 0x40, 0x9c, 0x44, 0x01,
|
||||
0x97, 0x4b, 0xbe, 0x12, 0xf7, 0xf7, 0x16, 0xd1, 0x22, 0x42, 0xf8, 0x40, 0xad, 0x52, 0x86, 0xfb,
|
||||
0xb3, 0x0e, 0xed, 0x01, 0x97, 0x89, 0x37, 0x1b, 0x70, 0xc9, 0xe6, 0x4c, 0x32, 0xf2, 0x14, 0x2a,
|
||||
0x2a, 0x87, 0xa3, 0x75, 0xb4, 0x6e, 0xfb, 0xc9, 0xa3, 0x5e, 0x91, 0xa3, 0xb7, 0xcd, 0xcc, 0xcc,
|
||||
0xc9, 0x3a, 0xe6, 0x14, 0x43, 0xc8, 0xa7, 0x40, 0x02, 0xc4, 0xa6, 0x57, 0x2c, 0xf0, 0xfc, 0xf5,
|
||||
0x34, 0x64, 0x01, 0x77, 0xf4, 0x8e, 0xd6, 0xb5, 0xa8, 0x9d, 0x7a, 0x4e, 0xd0, 0x31, 0x64, 0x01,
|
||||
0x27, 0x04, 0x2a, 0x4b, 0xee, 0xc7, 0x4e, 0x05, 0xfd, 0xb8, 0x56, 0xd8, 0x2a, 0xf4, 0xa4, 0x53,
|
||||
0x4d, 0x31, 0xb5, 0x76, 0xd7, 0x00, 0xc5, 0x4e, 0xa4, 0x01, 0xb5, 0x8b, 0xe1, 0x37, 0xc3, 0xd1,
|
||||
0xb7, 0x43, 0x7b, 0x47, 0x19, 0xc7, 0xa3, 0x8b, 0xe1, 0xa4, 0x4f, 0x6d, 0x8d, 0x58, 0x50, 0x3d,
|
||||
0x3d, 0xbc, 0x38, 0xed, 0xdb, 0x3a, 0x69, 0x81, 0x75, 0x76, 0x3e, 0x9e, 0x8c, 0x4e, 0xe9, 0xe1,
|
||||
0xc0, 0x36, 0x08, 0x81, 0x36, 0x7a, 0x0a, 0xac, 0xa2, 0x42, 0xc7, 0x17, 0x83, 0xc1, 0x21, 0x7d,
|
||||
0x69, 0x57, 0x49, 0x1d, 0x2a, 0xe7, 0xc3, 0x93, 0x91, 0x6d, 0x92, 0x26, 0xd4, 0xc7, 0x93, 0xc3,
|
||||
0x49, 0x7f, 0xdc, 0x9f, 0xd8, 0x35, 0xf7, 0x19, 0x98, 0x63, 0x16, 0xc4, 0x3e, 0x27, 0x7b, 0x50,
|
||||
0x7d, 0xcd, 0xfc, 0x55, 0x7a, 0x2c, 0x1a, 0x4d, 0x0d, 0xf2, 0x01, 0x58, 0xd2, 0x0b, 0xb8, 0x90,
|
||||
0x2c, 0x88, 0xf1, 0x3b, 0x0d, 0x5a, 0x00, 0x6e, 0x04, 0xf5, 0xfe, 0x35, 0x0f, 0x62, 0x9f, 0x25,
|
||||
0xe4, 0x00, 0x4c, 0x9f, 0x5d, 0x72, 0x5f, 0x38, 0x5a, 0xc7, 0xe8, 0x36, 0x9e, 0xec, 0x96, 0xcf,
|
||||
0xf5, 0xb9, 0xf2, 0x1c, 0x55, 0xde, 0xfc, 0xf1, 0x70, 0x87, 0x66, 0xb4, 0x62, 0x43, 0xfd, 0x1f,
|
||||
0x37, 0x34, 0xde, 0xde, 0xf0, 0xb7, 0x2a, 0x58, 0x67, 0x9e, 0x90, 0xd1, 0x22, 0x61, 0x01, 0x79,
|
||||
0x00, 0xd6, 0x2c, 0x5a, 0x85, 0x72, 0xea, 0x85, 0x12, 0x65, 0x57, 0xce, 0x76, 0x68, 0x1d, 0xa1,
|
||||
0xf3, 0x50, 0x92, 0x0f, 0xa1, 0x91, 0xba, 0xaf, 0xfc, 0x88, 0xc9, 0x74, 0x9b, 0xb3, 0x1d, 0x0a,
|
||||
0x08, 0x9e, 0x28, 0x8c, 0xd8, 0x60, 0x88, 0x55, 0x80, 0xfb, 0x68, 0x54, 0x2d, 0xc9, 0x3d, 0x30,
|
||||
0xc5, 0x6c, 0xc9, 0x03, 0x86, 0xb7, 0xb6, 0x4b, 0x33, 0x8b, 0x3c, 0x82, 0xf6, 0x8f, 0x3c, 0x89,
|
||||
0xa6, 0x72, 0x99, 0x70, 0xb1, 0x8c, 0xfc, 0x39, 0xde, 0xa0, 0x46, 0x5b, 0x0a, 0x9d, 0xe4, 0x20,
|
||||
0xf9, 0x28, 0xa3, 0x15, 0xba, 0x4c, 0xd4, 0xa5, 0xd1, 0xa6, 0xc2, 0x8f, 0x73, 0x6d, 0x9f, 0x80,
|
||||
0x5d, 0xe2, 0xa5, 0x02, 0x6b, 0x28, 0x50, 0xa3, 0xed, 0x0d, 0x33, 0x15, 0x79, 0x0c, 0xed, 0x90,
|
||||
0x2f, 0x98, 0xf4, 0x5e, 0xf3, 0xa9, 0x88, 0x59, 0x28, 0x9c, 0x3a, 0x9e, 0xf0, 0xbd, 0xf2, 0x09,
|
||||
0x1f, 0xad, 0x66, 0xaf, 0xb8, 0x1c, 0xc7, 0x2c, 0xcc, 0x8e, 0xb9, 0x95, 0xc7, 0x28, 0x4c, 0x90,
|
||||
0x8f, 0xe1, 0xce, 0x26, 0xc9, 0x9c, 0xfb, 0x92, 0x09, 0xc7, 0xea, 0x18, 0x5d, 0x42, 0x37, 0xb9,
|
||||
0xbf, 0x46, 0x74, 0x8b, 0x88, 0xea, 0x84, 0x03, 0x1d, 0xa3, 0xab, 0x15, 0x44, 0x94, 0x26, 0x94,
|
||||
0xac, 0x38, 0x12, 0x5e, 0x49, 0x56, 0xe3, 0xdf, 0xc8, 0xca, 0x63, 0x36, 0xb2, 0x36, 0x49, 0x32,
|
||||
0x59, 0xcd, 0x54, 0x56, 0x0e, 0x17, 0xb2, 0x36, 0xc4, 0x4c, 0x56, 0x2b, 0x95, 0x95, 0xc3, 0x99,
|
||||
0xac, 0xaf, 0x00, 0x12, 0x2e, 0xb8, 0x9c, 0x2e, 0xd5, 0xe9, 0xb7, 0xb1, 0xc7, 0x1f, 0x96, 0x25,
|
||||
0x6d, 0xea, 0xa7, 0x47, 0x15, 0xef, 0xcc, 0x0b, 0x25, 0xb5, 0x92, 0x7c, 0xb9, 0x5d, 0x80, 0x77,
|
||||
0xde, 0x2e, 0xc0, 0xcf, 0xc1, 0xda, 0x44, 0x6d, 0x77, 0x6a, 0x0d, 0x8c, 0x97, 0xfd, 0xb1, 0xad,
|
||||
0x11, 0x13, 0xf4, 0xe1, 0xc8, 0xd6, 0x8b, 0x6e, 0x35, 0x8e, 0x6a, 0x50, 0x45, 0xcd, 0x47, 0x4d,
|
||||
0x80, 0xe2, 0xda, 0xdd, 0x67, 0x00, 0xc5, 0xf9, 0xa8, 0xca, 0x8b, 0xae, 0xae, 0x04, 0x4f, 0x4b,
|
||||
0x79, 0x97, 0x66, 0x96, 0xc2, 0x7d, 0x1e, 0x2e, 0xe4, 0x12, 0x2b, 0xb8, 0x45, 0x33, 0xcb, 0xfd,
|
||||
0x4b, 0x03, 0x98, 0x78, 0x01, 0x1f, 0xf3, 0xc4, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x09, 0xd4, 0x04,
|
||||
0xb6, 0xbe, 0x70, 0x74, 0x8c, 0x20, 0xe5, 0x88, 0x74, 0x2a, 0x64, 0x21, 0x39, 0x91, 0x7c, 0x01,
|
||||
0x16, 0xcf, 0x1a, 0x5e, 0x38, 0x06, 0x46, 0xed, 0x95, 0xa3, 0xf2, 0x69, 0x90, 0xc5, 0x15, 0x64,
|
||||
0xf2, 0x25, 0xc0, 0x32, 0x3f, 0x78, 0xe1, 0x54, 0x30, 0xf4, 0xee, 0x3b, 0xaf, 0x25, 0x8b, 0x2d,
|
||||
0xd1, 0xdd, 0xc7, 0x50, 0xc5, 0x2f, 0x50, 0xd3, 0x13, 0x27, 0xae, 0x96, 0x4e, 0x4f, 0xb5, 0xde,
|
||||
0x9e, 0x23, 0x56, 0x36, 0x47, 0xdc, 0xa7, 0x60, 0x3e, 0x4f, 0xbf, 0xf3, 0x7d, 0x0f, 0xc6, 0xfd,
|
||||
0x49, 0x83, 0x26, 0xe2, 0x03, 0x26, 0x67, 0x4b, 0x9e, 0x90, 0xc7, 0x5b, 0x0f, 0xc6, 0x83, 0x5b,
|
||||
0xf1, 0x19, 0xaf, 0x57, 0x7a, 0x28, 0x72, 0xa1, 0xfa, 0xbb, 0x84, 0x1a, 0x65, 0xa1, 0x5d, 0xa8,
|
||||
0xe0, 0xd8, 0x37, 0x41, 0xef, 0xbf, 0x48, 0xeb, 0x68, 0xd8, 0x7f, 0x91, 0xd6, 0x11, 0x55, 0xa3,
|
||||
0x5e, 0x01, 0xb4, 0x6f, 0x1b, 0xee, 0x2f, 0x9a, 0x2a, 0x3e, 0x36, 0x57, 0xb5, 0x27, 0xc8, 0xff,
|
||||
0xa0, 0x26, 0x24, 0x8f, 0xa7, 0x81, 0x40, 0x5d, 0x06, 0x35, 0x95, 0x39, 0x10, 0x6a, 0xeb, 0xab,
|
||||
0x55, 0x38, 0xcb, 0xb7, 0x56, 0x6b, 0xf2, 0x7f, 0xa8, 0x0b, 0xc9, 0x12, 0xa9, 0xd8, 0xe9, 0x50,
|
||||
0xad, 0xa1, 0x3d, 0x10, 0xe4, 0x2e, 0x98, 0x3c, 0x9c, 0x4f, 0xf1, 0x52, 0x94, 0xa3, 0xca, 0xc3,
|
||||
0xf9, 0x40, 0x90, 0xfb, 0x50, 0x5f, 0x24, 0xd1, 0x2a, 0xf6, 0xc2, 0x85, 0x53, 0xed, 0x18, 0x5d,
|
||||
0x8b, 0x6e, 0x6c, 0xd2, 0x06, 0xfd, 0x72, 0x8d, 0x83, 0xad, 0x4e, 0xf5, 0xcb, 0xb5, 0xca, 0x9e,
|
||||
0xb0, 0x70, 0xc1, 0x55, 0x92, 0x5a, 0x9a, 0x1d, 0xed, 0x81, 0x70, 0x7f, 0xd5, 0xa0, 0x7a, 0xbc,
|
||||
0x5c, 0x85, 0xaf, 0xc8, 0x3e, 0x34, 0x02, 0x2f, 0x9c, 0xaa, 0x56, 0x2a, 0x34, 0x5b, 0x81, 0x17,
|
||||
0xaa, 0x1a, 0x1e, 0x08, 0xf4, 0xb3, 0xeb, 0x8d, 0x3f, 0x7b, 0x6b, 0x02, 0x76, 0x9d, 0xf9, 0x7b,
|
||||
0xd9, 0x25, 0x18, 0x78, 0x09, 0xf7, 0xcb, 0x97, 0x80, 0x1b, 0xf4, 0xfa, 0xe1, 0x2c, 0x9a, 0x7b,
|
||||
0xe1, 0xa2, 0xb8, 0x01, 0xf5, 0x86, 0xe3, 0x57, 0x35, 0x29, 0xae, 0xdd, 0x03, 0xa8, 0xe7, 0xac,
|
||||
0x5b, 0xcd, 0xfb, 0xdd, 0x48, 0x3d, 0xb1, 0x5b, 0xef, 0xaa, 0xee, 0xfe, 0x00, 0x2d, 0x4c, 0xce,
|
||||
0xe7, 0xff, 0xb5, 0xcb, 0x0e, 0xc0, 0x9c, 0xa9, 0x0c, 0x79, 0x93, 0xed, 0xde, 0x12, 0x9e, 0x07,
|
||||
0xa4, 0xb4, 0xa3, 0xbd, 0x37, 0x37, 0xfb, 0xda, 0xef, 0x37, 0xfb, 0xda, 0x9f, 0x37, 0xfb, 0xda,
|
||||
0xf7, 0xa6, 0x62, 0xc7, 0x97, 0x97, 0x26, 0xfe, 0xcd, 0x7c, 0xf6, 0x77, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0x53, 0x09, 0xe5, 0x37, 0xfe, 0x08, 0x00, 0x00,
|
||||
0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02,
|
||||
0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12,
|
||||
0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea,
|
||||
0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c,
|
||||
0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce,
|
||||
0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9,
|
||||
0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf,
|
||||
0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9,
|
||||
0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e,
|
||||
0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d,
|
||||
0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73,
|
||||
0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca,
|
||||
0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c,
|
||||
0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3,
|
||||
0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6,
|
||||
0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97,
|
||||
0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c,
|
||||
0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca,
|
||||
0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24,
|
||||
0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62,
|
||||
0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf,
|
||||
0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7,
|
||||
0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87,
|
||||
0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a,
|
||||
0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80,
|
||||
0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29,
|
||||
0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38,
|
||||
0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92,
|
||||
0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8,
|
||||
0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8,
|
||||
0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0,
|
||||
0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9,
|
||||
0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde,
|
||||
0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25,
|
||||
0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9,
|
||||
0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54,
|
||||
0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92,
|
||||
0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b,
|
||||
0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6,
|
||||
0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a,
|
||||
0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5,
|
||||
0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd,
|
||||
0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02,
|
||||
0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04,
|
||||
0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32,
|
||||
0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16,
|
||||
0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f,
|
||||
0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f,
|
||||
0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e,
|
||||
0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a,
|
||||
0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8,
|
||||
0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f,
|
||||
0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd,
|
||||
0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a,
|
||||
0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c,
|
||||
0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d,
|
||||
0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98,
|
||||
0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7,
|
||||
0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02,
|
||||
0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27,
|
||||
0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b,
|
||||
0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba,
|
||||
0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62,
|
||||
0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7,
|
||||
0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce,
|
||||
0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3,
|
||||
0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d,
|
||||
0x13, 0x09, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
|
||||
|
|
|
@ -169,9 +169,10 @@ message Chunk {
|
|||
|
||||
// We require this to match chunkenc.Encoding.
|
||||
enum Encoding {
|
||||
UNKNOWN = 0;
|
||||
XOR = 1;
|
||||
HISTOGRAM = 2;
|
||||
UNKNOWN = 0;
|
||||
XOR = 1;
|
||||
HISTOGRAM = 2;
|
||||
FLOAT_HISTOGRAM = 3;
|
||||
}
|
||||
Encoding type = 3;
|
||||
bytes data = 4;
|
||||
|
|
|
@ -3181,8 +3181,7 @@ func TestNativeHistogramRate(t *testing.T) {
|
|||
require.Len(t, vector, 1)
|
||||
actualHistogram := vector[0].H
|
||||
expectedHistogram := &histogram.FloatHistogram{
|
||||
// TODO(beorn7): This should be GaugeType. Change it once supported by PromQL.
|
||||
CounterResetHint: histogram.NotCounterReset,
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 1. / 15.,
|
||||
|
@ -3226,8 +3225,7 @@ func TestNativeFloatHistogramRate(t *testing.T) {
|
|||
require.Len(t, vector, 1)
|
||||
actualHistogram := vector[0].H
|
||||
expectedHistogram := &histogram.FloatHistogram{
|
||||
// TODO(beorn7): This should be GaugeType. Change it once supported by PromQL.
|
||||
CounterResetHint: histogram.NotCounterReset,
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 1. / 15.,
|
||||
|
|
|
@ -187,6 +187,7 @@ func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram {
|
|||
if curr == nil {
|
||||
return nil // Range contains a mix of histograms and floats.
|
||||
}
|
||||
// TODO(trevorwhitney): Check if isCounter is consistent with curr.CounterResetHint.
|
||||
if !isCounter {
|
||||
continue
|
||||
}
|
||||
|
@ -208,6 +209,8 @@ func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram {
|
|||
prev = curr
|
||||
}
|
||||
}
|
||||
|
||||
h.CounterResetHint = histogram.GaugeType
|
||||
return h.Compact(0)
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/rulefmt"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
|
@ -688,9 +687,6 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
|
||||
for _, s := range vector {
|
||||
if s.H != nil {
|
||||
// We assume that all native histogram results are gauge histograms.
|
||||
// TODO(codesome): once PromQL can give the counter reset info, remove this assumption.
|
||||
s.H.CounterResetHint = histogram.GaugeType
|
||||
_, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H)
|
||||
} else {
|
||||
_, err = app.Append(0, s.Metric, s.T, s.V)
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
"go.uber.org/goleak"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/rulefmt"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
|
@ -1860,7 +1859,6 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
|
|||
for _, h := range hists[1:] {
|
||||
expHist = expHist.Add(h.ToFloat())
|
||||
}
|
||||
expHist.CounterResetHint = histogram.GaugeType
|
||||
|
||||
it := s.Iterator(nil)
|
||||
require.Equal(t, chunkenc.ValFloatHistogram, it.Next())
|
||||
|
|
|
@ -80,7 +80,7 @@ func init() {
|
|||
// Client allows reading and writing from/to a remote HTTP endpoint.
|
||||
type Client struct {
|
||||
remoteName string // Used to differentiate clients in metrics.
|
||||
url *config_util.URL
|
||||
urlString string // url.String()
|
||||
Client *http.Client
|
||||
timeout time.Duration
|
||||
|
||||
|
@ -122,7 +122,7 @@ func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
|
|||
|
||||
return &Client{
|
||||
remoteName: name,
|
||||
url: conf.URL,
|
||||
urlString: conf.URL.String(),
|
||||
Client: httpClient,
|
||||
timeout: time.Duration(conf.Timeout),
|
||||
readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()),
|
||||
|
@ -154,7 +154,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
|||
|
||||
return &Client{
|
||||
remoteName: name,
|
||||
url: conf.URL,
|
||||
urlString: conf.URL.String(),
|
||||
Client: httpClient,
|
||||
retryOnRateLimit: conf.RetryOnRateLimit,
|
||||
timeout: time.Duration(conf.Timeout),
|
||||
|
@ -187,7 +187,7 @@ type RecoverableError struct {
|
|||
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
|
||||
// and encoded bytes from codec.go.
|
||||
func (c *Client) Store(ctx context.Context, req []byte) error {
|
||||
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req))
|
||||
httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(req))
|
||||
if err != nil {
|
||||
// Errors from NewRequest are from unparsable URLs, so are not
|
||||
// recoverable.
|
||||
|
@ -255,7 +255,7 @@ func (c Client) Name() string {
|
|||
|
||||
// Endpoint is the remote read or write endpoint.
|
||||
func (c Client) Endpoint() string {
|
||||
return c.url.String()
|
||||
return c.urlString
|
||||
}
|
||||
|
||||
// Read reads from a remote endpoint.
|
||||
|
@ -276,7 +276,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
|
|||
}
|
||||
|
||||
compressed := snappy.Encode(nil, data)
|
||||
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(compressed))
|
||||
httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(compressed))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create request: %w", err)
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
|
|||
}
|
||||
|
||||
if httpResp.StatusCode/100 != 2 {
|
||||
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.url.String(), httpResp.Status, strings.TrimSpace(string(compressed)))
|
||||
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
|
||||
}
|
||||
|
||||
uncompressed, err := snappy.Decode(nil, compressed)
|
||||
|
|
|
@ -120,10 +120,13 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
|||
for ss.Next() {
|
||||
series := ss.At()
|
||||
iter = series.Iterator(iter)
|
||||
samples := []prompb.Sample{}
|
||||
|
||||
for iter.Next() == chunkenc.ValFloat {
|
||||
// TODO(beorn7): Add Histogram support.
|
||||
var (
|
||||
samples []prompb.Sample
|
||||
histograms []prompb.Histogram
|
||||
)
|
||||
|
||||
for valType := iter.Next(); valType != chunkenc.ValNone; valType = iter.Next() {
|
||||
numSamples++
|
||||
if sampleLimit > 0 && numSamples > sampleLimit {
|
||||
return nil, ss.Warnings(), HTTPError{
|
||||
|
@ -131,19 +134,32 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
|||
status: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
ts, val := iter.At()
|
||||
samples = append(samples, prompb.Sample{
|
||||
Timestamp: ts,
|
||||
Value: val,
|
||||
})
|
||||
|
||||
switch valType {
|
||||
case chunkenc.ValFloat:
|
||||
ts, val := iter.At()
|
||||
samples = append(samples, prompb.Sample{
|
||||
Timestamp: ts,
|
||||
Value: val,
|
||||
})
|
||||
case chunkenc.ValHistogram:
|
||||
ts, h := iter.AtHistogram()
|
||||
histograms = append(histograms, HistogramToHistogramProto(ts, h))
|
||||
case chunkenc.ValFloatHistogram:
|
||||
ts, fh := iter.AtFloatHistogram()
|
||||
histograms = append(histograms, FloatHistogramToHistogramProto(ts, fh))
|
||||
default:
|
||||
return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType)
|
||||
}
|
||||
}
|
||||
if err := iter.Err(); err != nil {
|
||||
return nil, ss.Warnings(), err
|
||||
}
|
||||
|
||||
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
|
||||
Labels: labelsToLabelsProto(series.Labels(), nil),
|
||||
Samples: samples,
|
||||
Labels: labelsToLabelsProto(series.Labels(), nil),
|
||||
Samples: samples,
|
||||
Histograms: histograms,
|
||||
})
|
||||
}
|
||||
return resp, ss.Warnings(), ss.Err()
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -171,12 +171,12 @@ func TestSeriesSetFilter(t *testing.T) {
|
|||
toRemove: []string{"foo"},
|
||||
in: &prompb.QueryResult{
|
||||
Timeseries: []*prompb.TimeSeries{
|
||||
{Labels: labelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b"), nil), Samples: []prompb.Sample{}},
|
||||
{Labels: labelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b"), nil)},
|
||||
},
|
||||
},
|
||||
expected: &prompb.QueryResult{
|
||||
Timeseries: []*prompb.TimeSeries{
|
||||
{Labels: labelsToLabelsProto(labels.FromStrings("a", "b"), nil), Samples: []prompb.Sample{}},
|
||||
{Labels: labelsToLabelsProto(labels.FromStrings("a", "b"), nil)},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -303,6 +303,7 @@ func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir strin
|
|||
if err := w.Repair(err); err != nil {
|
||||
return nil, errors.Wrap(err, "repair corrupted WAL")
|
||||
}
|
||||
level.Info(db.logger).Log("msg", "successfully repaired WAL")
|
||||
}
|
||||
|
||||
go db.run()
|
||||
|
|
|
@ -47,20 +47,9 @@ func (e Encoding) String() string {
|
|||
return "<unknown>"
|
||||
}
|
||||
|
||||
// Chunk encodings for out-of-order chunks.
|
||||
// These encodings must be only used by the Head block for its internal bookkeeping.
|
||||
const (
|
||||
OutOfOrderMask = 0b10000000
|
||||
EncOOOXOR = EncXOR | OutOfOrderMask
|
||||
)
|
||||
|
||||
func IsOutOfOrderChunk(e Encoding) bool {
|
||||
return (e & OutOfOrderMask) != 0
|
||||
}
|
||||
|
||||
// IsValidEncoding returns true for supported encodings.
|
||||
func IsValidEncoding(e Encoding) bool {
|
||||
return e == EncXOR || e == EncOOOXOR || e == EncHistogram || e == EncFloatHistogram
|
||||
return e == EncXOR || e == EncHistogram || e == EncFloatHistogram
|
||||
}
|
||||
|
||||
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
|
||||
|
@ -262,7 +251,7 @@ func NewPool() Pool {
|
|||
|
||||
func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
|
||||
switch e {
|
||||
case EncXOR, EncOOOXOR:
|
||||
case EncXOR:
|
||||
c := p.xor.Get().(*XORChunk)
|
||||
c.b.stream = b
|
||||
c.b.count = 0
|
||||
|
@ -283,7 +272,7 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
|
|||
|
||||
func (p *pool) Put(c Chunk) error {
|
||||
switch c.Encoding() {
|
||||
case EncXOR, EncOOOXOR:
|
||||
case EncXOR:
|
||||
xc, ok := c.(*XORChunk)
|
||||
// This may happen often with wrapped chunks. Nothing we can really do about
|
||||
// it but returning an error would cause a lot of allocations again. Thus,
|
||||
|
@ -327,7 +316,7 @@ func (p *pool) Put(c Chunk) error {
|
|||
// bytes.
|
||||
func FromData(e Encoding, d []byte) (Chunk, error) {
|
||||
switch e {
|
||||
case EncXOR, EncOOOXOR:
|
||||
case EncXOR:
|
||||
return &XORChunk{b: bstream{count: 0, stream: d}}, nil
|
||||
case EncHistogram:
|
||||
return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil
|
||||
|
|
|
@ -506,12 +506,3 @@ func xorRead(br *bstreamReader, value *float64, leading, trailing *uint8) error
|
|||
*value = math.Float64frombits(vbits)
|
||||
return nil
|
||||
}
|
||||
|
||||
// OOOXORChunk holds a XORChunk and overrides the Encoding() method.
|
||||
type OOOXORChunk struct {
|
||||
*XORChunk
|
||||
}
|
||||
|
||||
func (c *OOOXORChunk) Encoding() Encoding {
|
||||
return EncOOOXOR
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ type chunkWriteJob struct {
|
|||
maxt int64
|
||||
chk chunkenc.Chunk
|
||||
ref ChunkDiskMapperRef
|
||||
isOOO bool
|
||||
callback func(error)
|
||||
}
|
||||
|
||||
|
@ -76,7 +77,7 @@ type chunkWriteQueue struct {
|
|||
}
|
||||
|
||||
// writeChunkF is a function which writes chunks, it is dynamic to allow mocking in tests.
|
||||
type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool) error
|
||||
type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error
|
||||
|
||||
func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChunkF) *chunkWriteQueue {
|
||||
counters := prometheus.NewCounterVec(
|
||||
|
@ -133,7 +134,7 @@ func (c *chunkWriteQueue) start() {
|
|||
}
|
||||
|
||||
func (c *chunkWriteQueue) processJob(job chunkWriteJob) {
|
||||
err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.cutFile)
|
||||
err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.isOOO, job.cutFile)
|
||||
if job.callback != nil {
|
||||
job.callback(err)
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ func TestChunkWriteQueue_GettingChunkFromQueue(t *testing.T) {
|
|||
blockWriterWg.Add(1)
|
||||
|
||||
// blockingChunkWriter blocks until blockWriterWg is done.
|
||||
blockingChunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _ bool) error {
|
||||
blockingChunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _, _ bool) error {
|
||||
blockWriterWg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) {
|
|||
gotCutFile bool
|
||||
)
|
||||
|
||||
blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) error {
|
||||
blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error {
|
||||
gotSeriesRef = seriesRef
|
||||
gotMint = mint
|
||||
gotMaxt = maxt
|
||||
|
@ -101,7 +101,7 @@ func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) {
|
|||
unblockChunkWriterCh := make(chan struct{}, sizeLimit)
|
||||
|
||||
// blockingChunkWriter blocks until the unblockChunkWriterCh channel returns a value.
|
||||
blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) error {
|
||||
blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error {
|
||||
<-unblockChunkWriterCh
|
||||
return nil
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) {
|
|||
|
||||
func TestChunkWriteQueue_HandlerErrorViaCallback(t *testing.T) {
|
||||
testError := errors.New("test error")
|
||||
chunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _ bool) error {
|
||||
chunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _, _ bool) error {
|
||||
return testError
|
||||
}
|
||||
|
||||
|
@ -211,7 +211,7 @@ func BenchmarkChunkWriteQueue_addJob(b *testing.B) {
|
|||
for _, concurrentWrites := range []int{1, 10, 100, 1000} {
|
||||
b.Run(fmt.Sprintf("%d concurrent writes", concurrentWrites), func(b *testing.B) {
|
||||
issueReadSignal := make(chan struct{})
|
||||
q := newChunkWriteQueue(nil, 1000, func(ref HeadSeriesRef, i, i2 int64, chunk chunkenc.Chunk, ref2 ChunkDiskMapperRef, b bool) error {
|
||||
q := newChunkWriteQueue(nil, 1000, func(ref HeadSeriesRef, i, i2 int64, chunk chunkenc.Chunk, ref2 ChunkDiskMapperRef, ooo, b bool) error {
|
||||
if withReads {
|
||||
select {
|
||||
case issueReadSignal <- struct{}{}:
|
||||
|
|
|
@ -273,6 +273,26 @@ func NewChunkDiskMapper(reg prometheus.Registerer, dir string, pool chunkenc.Poo
|
|||
return m, m.openMMapFiles()
|
||||
}
|
||||
|
||||
// Chunk encodings for out-of-order chunks.
|
||||
// These encodings must be only used by the Head block for its internal bookkeeping.
|
||||
const (
|
||||
OutOfOrderMask = uint8(0b10000000)
|
||||
)
|
||||
|
||||
func (cdm *ChunkDiskMapper) ApplyOutOfOrderMask(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
|
||||
enc := uint8(sourceEncoding) | OutOfOrderMask
|
||||
return chunkenc.Encoding(enc)
|
||||
}
|
||||
|
||||
func (cdm *ChunkDiskMapper) IsOutOfOrderChunk(e chunkenc.Encoding) bool {
|
||||
return (uint8(e) & OutOfOrderMask) != 0
|
||||
}
|
||||
|
||||
func (cdm *ChunkDiskMapper) RemoveMasks(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
|
||||
restored := uint8(sourceEncoding) & (^OutOfOrderMask)
|
||||
return chunkenc.Encoding(restored)
|
||||
}
|
||||
|
||||
// openMMapFiles opens all files within dir for mmapping.
|
||||
func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) {
|
||||
cdm.mmappedChunkFiles = map[int]*mmappedChunkFile{}
|
||||
|
@ -403,17 +423,17 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro
|
|||
|
||||
// WriteChunk writes the chunk to the disk.
|
||||
// The returned chunk ref is the reference from where the chunk encoding starts for the chunk.
|
||||
func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) {
|
||||
func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, isOOO bool, callback func(err error)) (chkRef ChunkDiskMapperRef) {
|
||||
// cdm.evtlPosMtx must be held to serialize the calls to cdm.evtlPos.getNextChunkRef() and the writing of the chunk (either with or without queue).
|
||||
cdm.evtlPosMtx.Lock()
|
||||
defer cdm.evtlPosMtx.Unlock()
|
||||
ref, cutFile := cdm.evtlPos.getNextChunkRef(chk)
|
||||
|
||||
if cdm.writeQueue != nil {
|
||||
return cdm.writeChunkViaQueue(ref, cutFile, seriesRef, mint, maxt, chk, callback)
|
||||
return cdm.writeChunkViaQueue(ref, isOOO, cutFile, seriesRef, mint, maxt, chk, callback)
|
||||
}
|
||||
|
||||
err := cdm.writeChunk(seriesRef, mint, maxt, chk, ref, cutFile)
|
||||
err := cdm.writeChunk(seriesRef, mint, maxt, chk, ref, isOOO, cutFile)
|
||||
if callback != nil {
|
||||
callback(err)
|
||||
}
|
||||
|
@ -421,7 +441,7 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64
|
|||
return ref
|
||||
}
|
||||
|
||||
func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, cutFile bool, seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) {
|
||||
func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, isOOO, cutFile bool, seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) {
|
||||
var err error
|
||||
if callback != nil {
|
||||
defer func() {
|
||||
|
@ -438,13 +458,14 @@ func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, cutFile b
|
|||
maxt: maxt,
|
||||
chk: chk,
|
||||
ref: ref,
|
||||
isOOO: isOOO,
|
||||
callback: callback,
|
||||
})
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) (err error) {
|
||||
func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) (err error) {
|
||||
cdm.writePathMtx.Lock()
|
||||
defer cdm.writePathMtx.Unlock()
|
||||
|
||||
|
@ -476,7 +497,11 @@ func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64
|
|||
bytesWritten += MintMaxtSize
|
||||
binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(maxt))
|
||||
bytesWritten += MintMaxtSize
|
||||
cdm.byteBuf[bytesWritten] = byte(chk.Encoding())
|
||||
enc := chk.Encoding()
|
||||
if isOOO {
|
||||
enc = cdm.ApplyOutOfOrderMask(enc)
|
||||
}
|
||||
cdm.byteBuf[bytesWritten] = byte(enc)
|
||||
bytesWritten += ChunkEncodingSize
|
||||
n := binary.PutUvarint(cdm.byteBuf[bytesWritten:], uint64(len(chk.Bytes())))
|
||||
bytesWritten += n
|
||||
|
@ -697,7 +722,9 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error
|
|||
|
||||
// Encoding.
|
||||
chkEnc := mmapFile.byteSlice.Range(chkStart, chkStart+ChunkEncodingSize)[0]
|
||||
|
||||
sourceChkEnc := chunkenc.Encoding(chkEnc)
|
||||
// Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding.
|
||||
chkEnc = byte(cdm.RemoveMasks(sourceChkEnc))
|
||||
// Data length.
|
||||
// With the minimum chunk length this should never cause us reading
|
||||
// over the end of the slice.
|
||||
|
@ -763,7 +790,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error
|
|||
// and runs the provided function with information about each chunk. It returns on the first error encountered.
|
||||
// NOTE: This method needs to be called at least once after creating ChunkDiskMapper
|
||||
// to set the maxt of all the file.
|
||||
func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error) (err error) {
|
||||
func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error) (err error) {
|
||||
cdm.writePathMtx.Lock()
|
||||
defer cdm.writePathMtx.Unlock()
|
||||
|
||||
|
@ -861,8 +888,10 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu
|
|||
if maxt > mmapFile.maxt {
|
||||
mmapFile.maxt = maxt
|
||||
}
|
||||
|
||||
if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc); err != nil {
|
||||
isOOO := cdm.IsOutOfOrderChunk(chkEnc)
|
||||
// Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding.
|
||||
chkEnc = cdm.RemoveMasks(chkEnc)
|
||||
if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc, isOOO); err != nil {
|
||||
if cerr, ok := err.(*CorruptionErr); ok {
|
||||
cerr.Dir = cdm.dir.Name()
|
||||
cerr.FileIndex = segID
|
||||
|
|
|
@ -98,7 +98,11 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
|
|||
bytesWritten += MintMaxtSize
|
||||
binary.BigEndian.PutUint64(buf[bytesWritten:], uint64(maxt))
|
||||
bytesWritten += MintMaxtSize
|
||||
buf[bytesWritten] = byte(chunk.Encoding())
|
||||
enc := chunk.Encoding()
|
||||
if isOOO {
|
||||
enc = hrw.ApplyOutOfOrderMask(enc)
|
||||
}
|
||||
buf[bytesWritten] = byte(enc)
|
||||
bytesWritten += ChunkEncodingSize
|
||||
n := binary.PutUvarint(buf[bytesWritten:], uint64(len(chunk.Bytes())))
|
||||
bytesWritten += n
|
||||
|
@ -149,7 +153,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
|
|||
hrw = createChunkDiskMapper(t, dir)
|
||||
|
||||
idx := 0
|
||||
require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error {
|
||||
require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error {
|
||||
t.Helper()
|
||||
|
||||
expData := expectedData[idx]
|
||||
|
@ -158,7 +162,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
|
|||
require.Equal(t, expData.mint, mint)
|
||||
require.Equal(t, expData.maxt, maxt)
|
||||
require.Equal(t, expData.numSamples, numSamples)
|
||||
require.Equal(t, expData.isOOO, chunkenc.IsOutOfOrderChunk(encoding))
|
||||
require.Equal(t, expData.isOOO, isOOO)
|
||||
|
||||
actChunk, err := hrw.Chunk(expData.chunkRef)
|
||||
require.NoError(t, err)
|
||||
|
@ -187,7 +191,7 @@ func TestChunkDiskMapper_WriteUnsupportedChunk_Chunk_IterateChunks(t *testing.T)
|
|||
require.NoError(t, hrw.Close())
|
||||
hrw = createChunkDiskMapper(t, dir)
|
||||
|
||||
require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error {
|
||||
require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error {
|
||||
t.Helper()
|
||||
|
||||
require.Equal(t, ucSeriesRef, seriesRef)
|
||||
|
@ -226,7 +230,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
|
|||
mint, maxt := timeRange+1, timeRange+step-1
|
||||
var err error
|
||||
awaitCb := make(chan struct{})
|
||||
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), func(cbErr error) {
|
||||
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), false, func(cbErr error) {
|
||||
err = cbErr
|
||||
close(awaitCb)
|
||||
})
|
||||
|
@ -320,7 +324,7 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
|
|||
|
||||
step := 100
|
||||
mint, maxt := timeRange+1, timeRange+step-1
|
||||
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), func(err error) {
|
||||
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), false, func(err error) {
|
||||
close(awaitCb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
@ -401,7 +405,7 @@ func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
|
|||
// Write a chunks to iterate on it later.
|
||||
var err error
|
||||
awaitCb := make(chan struct{})
|
||||
hrw.WriteChunk(1, 0, 1000, randomChunk(t), func(cbErr error) {
|
||||
hrw.WriteChunk(1, 0, 1000, randomChunk(t), false, func(cbErr error) {
|
||||
err = cbErr
|
||||
close(awaitCb)
|
||||
})
|
||||
|
@ -415,7 +419,7 @@ func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
|
|||
hrw = createChunkDiskMapper(t, dir)
|
||||
|
||||
// Forcefully failing IterateAllChunks.
|
||||
require.Error(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding) error {
|
||||
require.Error(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding, _ bool) error {
|
||||
return errors.New("random error")
|
||||
}))
|
||||
|
||||
|
@ -434,7 +438,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
|
|||
mint, maxt := timeRange+1, timeRange+step-1
|
||||
var err error
|
||||
awaitCb := make(chan struct{})
|
||||
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), func(cbErr error) {
|
||||
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), false, func(cbErr error) {
|
||||
err = cbErr
|
||||
close(awaitCb)
|
||||
})
|
||||
|
@ -527,7 +531,7 @@ func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper {
|
|||
hrw, err := NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), DefaultWriteBufferSize, writeQueueSize)
|
||||
require.NoError(t, err)
|
||||
require.False(t, hrw.fileMaxtSet)
|
||||
require.NoError(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding) error {
|
||||
require.NoError(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding, _ bool) error {
|
||||
return nil
|
||||
}))
|
||||
require.True(t, hrw.fileMaxtSet)
|
||||
|
@ -566,9 +570,8 @@ func createChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesRef HeadSer
|
|||
awaitCb := make(chan struct{})
|
||||
if rand.Intn(2) == 0 {
|
||||
isOOO = true
|
||||
chunk = &chunkenc.OOOXORChunk{XORChunk: chunk.(*chunkenc.XORChunk)}
|
||||
}
|
||||
chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, func(cbErr error) {
|
||||
chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, isOOO, func(cbErr error) {
|
||||
require.NoError(t, err)
|
||||
close(awaitCb)
|
||||
})
|
||||
|
@ -583,7 +586,7 @@ func writeUnsupportedChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesR
|
|||
maxt = int64((idx + 1) * 1000)
|
||||
chunk = randomUnsupportedChunk(t)
|
||||
awaitCb := make(chan struct{})
|
||||
chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, func(cbErr error) {
|
||||
chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, false, func(cbErr error) {
|
||||
require.NoError(t, err)
|
||||
close(awaitCb)
|
||||
})
|
||||
|
|
|
@ -859,11 +859,13 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
if err := wbl.Repair(initErr); err != nil {
|
||||
return nil, errors.Wrap(err, "repair corrupted OOO WAL")
|
||||
}
|
||||
level.Info(db.logger).Log("msg", "Successfully repaired OOO WAL")
|
||||
} else {
|
||||
level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr)
|
||||
if err := wal.Repair(initErr); err != nil {
|
||||
return nil, errors.Wrap(err, "repair corrupted WAL")
|
||||
}
|
||||
level.Info(db.logger).Log("msg", "Successfully repaired WAL")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
127
tsdb/db_test.go
127
tsdb/db_test.go
|
@ -130,7 +130,25 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str
|
|||
return result
|
||||
}
|
||||
|
||||
// queryChunks runs a matcher query against the querier and fully expands its data.
|
||||
// queryAndExpandChunks runs a matcher query against the querier and fully expands its data into samples.
|
||||
func queryAndExpandChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Matcher) map[string][][]tsdbutil.Sample {
|
||||
s := queryChunks(t, q, matchers...)
|
||||
|
||||
res := make(map[string][][]tsdbutil.Sample)
|
||||
for k, v := range s {
|
||||
var samples [][]tsdbutil.Sample
|
||||
for _, chk := range v {
|
||||
sam, err := storage.ExpandSamples(chk.Chunk.Iterator(nil), nil)
|
||||
require.NoError(t, err)
|
||||
samples = append(samples, sam)
|
||||
}
|
||||
res[k] = samples
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// queryChunks runs a matcher query against the querier and expands its data.
|
||||
func queryChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Matcher) map[string][]chunks.Meta {
|
||||
ss := q.Select(false, nil, matchers...)
|
||||
defer func() {
|
||||
|
@ -2367,7 +2385,7 @@ func TestDBReadOnly(t *testing.T) {
|
|||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
expBlocks []*Block
|
||||
expSeries map[string][]tsdbutil.Sample
|
||||
expChunks map[string][]chunks.Meta
|
||||
expChunks map[string][][]tsdbutil.Sample
|
||||
expDBHash []byte
|
||||
matchAll = labels.MustNewMatcher(labels.MatchEqual, "", "")
|
||||
err error
|
||||
|
@ -2418,7 +2436,7 @@ func TestDBReadOnly(t *testing.T) {
|
|||
expSeries = query(t, q, matchAll)
|
||||
cq, err := dbWritable.ChunkQuerier(context.TODO(), math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
expChunks = queryChunks(t, cq, matchAll)
|
||||
expChunks = queryAndExpandChunks(t, cq, matchAll)
|
||||
|
||||
require.NoError(t, dbWritable.Close()) // Close here to allow getting the dir hash for windows.
|
||||
expDBHash = testutil.DirHash(t, dbWritable.Dir())
|
||||
|
@ -2452,7 +2470,7 @@ func TestDBReadOnly(t *testing.T) {
|
|||
t.Run("chunk querier", func(t *testing.T) {
|
||||
cq, err := dbReadOnly.ChunkQuerier(context.TODO(), math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
readOnlySeries := queryChunks(t, cq, matchAll)
|
||||
readOnlySeries := queryAndExpandChunks(t, cq, matchAll)
|
||||
readOnlyDBHash := testutil.DirHash(t, dbDir)
|
||||
|
||||
require.Equal(t, len(expChunks), len(readOnlySeries), "total series mismatch")
|
||||
|
@ -4202,22 +4220,11 @@ func TestOOOCompaction(t *testing.T) {
|
|||
verifySamples(db.Blocks()[1], 120, 239)
|
||||
verifySamples(db.Blocks()[2], 240, 310)
|
||||
|
||||
// Because of OOO compaction, the current mmap file will end.
|
||||
// All the chunks are only in the first file.
|
||||
// Add a dummy mmap chunk to create a new mmap file.
|
||||
// There should be a single m-map file.
|
||||
mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
|
||||
files, err = os.ReadDir(mmapDir)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, files, 1)
|
||||
waitC := make(chan struct{})
|
||||
db.head.chunkDiskMapper.WriteChunk(100, 0, 0, chunkenc.NewXORChunk(), func(err error) {
|
||||
require.NoError(t, err)
|
||||
close(waitC)
|
||||
})
|
||||
<-waitC
|
||||
files, err = os.ReadDir(mmapDir)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, files, 2)
|
||||
|
||||
// Compact the in-order head and expect another block.
|
||||
// Since this is a forced compaction, this block is not aligned with 2h.
|
||||
|
@ -4233,7 +4240,7 @@ func TestOOOCompaction(t *testing.T) {
|
|||
files, err = os.ReadDir(mmapDir)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, files, 1)
|
||||
require.Equal(t, "000002", files[0].Name())
|
||||
require.Equal(t, "000001", files[0].Name())
|
||||
|
||||
// This will merge overlapping block.
|
||||
require.NoError(t, db.Compact())
|
||||
|
@ -5088,17 +5095,8 @@ func TestOOOCompactionFailure(t *testing.T) {
|
|||
require.Equal(t, len(db.Blocks()), 3)
|
||||
require.Equal(t, oldBlocks, db.Blocks())
|
||||
|
||||
// Because of OOO compaction, the current mmap file will end.
|
||||
// All the chunks are only in the first file.
|
||||
// Add a dummy mmap chunk to create a new mmap file.
|
||||
// There should be a single m-map file
|
||||
verifyMmapFiles("000001")
|
||||
waitC := make(chan struct{})
|
||||
db.head.chunkDiskMapper.WriteChunk(100, 0, 0, chunkenc.NewXORChunk(), func(err error) {
|
||||
require.NoError(t, err)
|
||||
close(waitC)
|
||||
})
|
||||
<-waitC
|
||||
verifyMmapFiles("000001", "000002")
|
||||
|
||||
// All but last WBL file will be deleted.
|
||||
// 8 files in total (starting at 0) because of 7 compaction calls.
|
||||
|
@ -5141,7 +5139,7 @@ func TestOOOCompactionFailure(t *testing.T) {
|
|||
|
||||
// The compaction also clears out the old m-map files. Including
|
||||
// the file that has ooo chunks.
|
||||
verifyMmapFiles("000002")
|
||||
verifyMmapFiles("000001")
|
||||
}
|
||||
|
||||
func TestWBLCorruption(t *testing.T) {
|
||||
|
@ -6454,3 +6452,76 @@ func compareSeries(t require.TestingT, expected, actual map[string][]tsdbutil.Sa
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestChunkQuerierReadWriteRace looks for any possible race between appending
|
||||
// samples and reading chunks because the head chunk that is being appended to
|
||||
// can be read in parallel and we should be able to make a copy of the chunk without
|
||||
// worrying about the parallel write.
|
||||
func TestChunkQuerierReadWriteRace(t *testing.T) {
|
||||
db := openTestDB(t, nil, nil)
|
||||
defer func() {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
|
||||
lbls := labels.FromStrings("foo", "bar")
|
||||
|
||||
writer := func() error {
|
||||
<-time.After(5 * time.Millisecond) // Initial pause while readers start.
|
||||
ts := 0
|
||||
for i := 0; i < 500; i++ {
|
||||
app := db.Appender(context.Background())
|
||||
for j := 0; j < 10; j++ {
|
||||
ts++
|
||||
_, err := app.Append(0, lbls, int64(ts), float64(ts*100))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err := app.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
<-time.After(time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
reader := func() {
|
||||
querier, err := db.ChunkQuerier(context.Background(), math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
defer func(q storage.ChunkQuerier) {
|
||||
require.NoError(t, q.Close())
|
||||
}(querier)
|
||||
ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||
for ss.Next() {
|
||||
cs := ss.At()
|
||||
it := cs.Iterator(nil)
|
||||
for it.Next() {
|
||||
m := it.At()
|
||||
b := m.Chunk.Bytes()
|
||||
bb := make([]byte, len(b))
|
||||
copy(bb, b) // This copying of chunk bytes detects any race.
|
||||
}
|
||||
}
|
||||
require.NoError(t, ss.Err())
|
||||
}
|
||||
|
||||
ch := make(chan struct{})
|
||||
var writerErr error
|
||||
go func() {
|
||||
defer close(ch)
|
||||
writerErr = writer()
|
||||
}()
|
||||
|
||||
Outer:
|
||||
for {
|
||||
reader()
|
||||
select {
|
||||
case <-ch:
|
||||
break Outer
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, writerErr)
|
||||
}
|
||||
|
|
|
@ -67,13 +67,13 @@ var (
|
|||
// 0 size queue to queue based chunk disk mapper.
|
||||
type chunkDiskMapper interface {
|
||||
CutNewFile() (returnErr error)
|
||||
IterateAllChunks(f func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error) (err error)
|
||||
IterateAllChunks(f func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error) (err error)
|
||||
Truncate(fileNo uint32) error
|
||||
DeleteCorrupted(originalErr error) error
|
||||
Size() (int64, error)
|
||||
Close() error
|
||||
Chunk(ref chunks.ChunkDiskMapperRef) (chunkenc.Chunk, error)
|
||||
WriteChunk(seriesRef chunks.HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef chunks.ChunkDiskMapperRef)
|
||||
WriteChunk(seriesRef chunks.HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, isOOO bool, callback func(err error)) (chkRef chunks.ChunkDiskMapperRef)
|
||||
IsQueueEmpty() bool
|
||||
}
|
||||
|
||||
|
@ -811,10 +811,9 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
|
|||
mmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{}
|
||||
oooMmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{}
|
||||
var lastRef, secondLastRef chunks.ChunkDiskMapperRef
|
||||
if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error {
|
||||
if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error {
|
||||
secondLastRef = lastRef
|
||||
lastRef = chunkRef
|
||||
isOOO := chunkenc.IsOutOfOrderChunk(encoding)
|
||||
if !isOOO && maxt < h.minValidTime.Load() {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1487,8 +1487,7 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper chunkDiskMapper) chu
|
|||
return 0
|
||||
}
|
||||
xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality.
|
||||
oooXor := &chunkenc.OOOXORChunk{XORChunk: xor}
|
||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, oooXor, handleChunkWriteError)
|
||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, xor, true, handleChunkWriteError)
|
||||
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
|
||||
ref: chunkRef,
|
||||
numSamples: uint16(xor.NumSamples()),
|
||||
|
@ -1505,7 +1504,7 @@ func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper chunkDiskMapper) {
|
|||
return
|
||||
}
|
||||
|
||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk, handleChunkWriteError)
|
||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk, false, handleChunkWriteError)
|
||||
s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{
|
||||
ref: chunkRef,
|
||||
numSamples: uint16(s.headChunk.chunk.NumSamples()),
|
||||
|
|
|
@ -303,22 +303,36 @@ func (h *headChunkReader) Close() error {
|
|||
|
||||
// Chunk returns the chunk for the reference number.
|
||||
func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
|
||||
chk, _, err := h.chunk(meta, false)
|
||||
return chk, err
|
||||
}
|
||||
|
||||
// ChunkWithCopy returns the chunk for the reference number.
|
||||
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk.
|
||||
func (h *headChunkReader) ChunkWithCopy(meta chunks.Meta) (chunkenc.Chunk, int64, error) {
|
||||
return h.chunk(meta, true)
|
||||
}
|
||||
|
||||
// chunk returns the chunk for the reference number.
|
||||
// If copyLastChunk is true, then it makes a copy of the head chunk if asked for it.
|
||||
// Also returns max time of the chunk.
|
||||
func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
|
||||
sid, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
|
||||
|
||||
s := h.head.series.getByID(sid)
|
||||
// This means that the series has been garbage collected.
|
||||
if s == nil {
|
||||
return nil, storage.ErrNotFound
|
||||
return nil, 0, storage.ErrNotFound
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
c, garbageCollect, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
|
||||
c, headChunk, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
|
||||
if err != nil {
|
||||
s.Unlock()
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
defer func() {
|
||||
if garbageCollect {
|
||||
if !headChunk {
|
||||
// Set this to nil so that Go GC can collect it after it has been used.
|
||||
c.chunk = nil
|
||||
h.head.memChunkPool.Put(c)
|
||||
|
@ -328,22 +342,36 @@ func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
|
|||
// This means that the chunk is outside the specified range.
|
||||
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
|
||||
s.Unlock()
|
||||
return nil, storage.ErrNotFound
|
||||
return nil, 0, storage.ErrNotFound
|
||||
}
|
||||
|
||||
chk, maxTime := c.chunk, c.maxTime
|
||||
if headChunk && copyLastChunk {
|
||||
// The caller may ask to copy the head chunk in order to take the
|
||||
// bytes of the chunk without causing the race between read and append.
|
||||
b := s.headChunk.chunk.Bytes()
|
||||
newB := make([]byte, len(b))
|
||||
copy(newB, b) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20.
|
||||
// TODO(codesome): Put back in the pool (non-trivial).
|
||||
chk, err = h.head.opts.ChunkPool.Get(s.headChunk.chunk.Encoding(), newB)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
return &safeChunk{
|
||||
Chunk: c.chunk,
|
||||
Chunk: chk,
|
||||
s: s,
|
||||
cid: cid,
|
||||
isoState: h.isoState,
|
||||
}, nil
|
||||
}, maxTime, nil
|
||||
}
|
||||
|
||||
// chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk.
|
||||
// If garbageCollect is true, it means that the returned *memChunk
|
||||
// If headChunk is true, it means that the returned *memChunk
|
||||
// (and not the chunkenc.Chunk inside it) can be garbage collected after its usage.
|
||||
func (s *memSeries) chunk(id chunks.HeadChunkID, cdm chunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, garbageCollect bool, err error) {
|
||||
func (s *memSeries) chunk(id chunks.HeadChunkID, cdm chunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, headChunk bool, err error) {
|
||||
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are
|
||||
// incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index.
|
||||
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
|
||||
|
@ -352,11 +380,12 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, cdm chunkDiskMapper, memChunkPo
|
|||
if ix < 0 || ix > len(s.mmappedChunks) {
|
||||
return nil, false, storage.ErrNotFound
|
||||
}
|
||||
|
||||
if ix == len(s.mmappedChunks) {
|
||||
if s.headChunk == nil {
|
||||
return nil, false, errors.New("invalid head chunk")
|
||||
}
|
||||
return s.headChunk, false, nil
|
||||
return s.headChunk, true, nil
|
||||
}
|
||||
chk, err := cdm.Chunk(s.mmappedChunks[ix].ref)
|
||||
if err != nil {
|
||||
|
@ -369,7 +398,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, cdm chunkDiskMapper, memChunkPo
|
|||
mc.chunk = chk
|
||||
mc.minTime = s.mmappedChunks[ix].minTime
|
||||
mc.maxTime = s.mmappedChunks[ix].maxTime
|
||||
return mc, true, nil
|
||||
return mc, false, nil
|
||||
}
|
||||
|
||||
// oooMergedChunk returns the requested chunk based on the given chunks.Meta
|
||||
|
|
|
@ -69,7 +69,7 @@ func newTestHead(t testing.TB, chunkRange int64, compressWAL, oooEnabled bool) (
|
|||
h, err := NewHead(nil, nil, wal, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(_ chunks.HeadSeriesRef, _ chunks.ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding) error {
|
||||
require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(_ chunks.HeadSeriesRef, _ chunks.ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding, _ bool) error {
|
||||
return nil
|
||||
}))
|
||||
|
||||
|
@ -4247,7 +4247,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) {
|
|||
|
||||
uc := newUnsupportedChunk()
|
||||
// Make this chunk not overlap with the previous and the next
|
||||
h.chunkDiskMapper.WriteChunk(chunks.HeadSeriesRef(seriesRef), 500, 600, uc, func(err error) { require.NoError(t, err) })
|
||||
h.chunkDiskMapper.WriteChunk(chunks.HeadSeriesRef(seriesRef), 500, 600, uc, false, func(err error) { require.NoError(t, err) })
|
||||
|
||||
app = h.Appender(ctx)
|
||||
for i := 700; i < 1200; i++ {
|
||||
|
|
|
@ -533,7 +533,11 @@ func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr Chunk
|
|||
p.currChkMeta = chunks.Meta{}
|
||||
}
|
||||
|
||||
func (p *populateWithDelGenericSeriesIterator) next() bool {
|
||||
// If copyHeadChunk is true, then the head chunk (i.e. the in-memory chunk of the TSDB)
|
||||
// is deep copied to avoid races between reads and copying chunk bytes.
|
||||
// However, if the deletion intervals overlaps with the head chunk, then the head chunk is
|
||||
// not copied irrespective of copyHeadChunk because it will be re-encoded later anyway.
|
||||
func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool {
|
||||
if p.err != nil || p.i >= len(p.chks)-1 {
|
||||
return false
|
||||
}
|
||||
|
@ -541,12 +545,6 @@ func (p *populateWithDelGenericSeriesIterator) next() bool {
|
|||
p.i++
|
||||
p.currChkMeta = p.chks[p.i]
|
||||
|
||||
p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta)
|
||||
if p.err != nil {
|
||||
p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String())
|
||||
return false
|
||||
}
|
||||
|
||||
p.bufIter.Intervals = p.bufIter.Intervals[:0]
|
||||
for _, interval := range p.intervals {
|
||||
if p.currChkMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) {
|
||||
|
@ -554,22 +552,28 @@ func (p *populateWithDelGenericSeriesIterator) next() bool {
|
|||
}
|
||||
}
|
||||
|
||||
// Re-encode head chunks that are still open (being appended to) or
|
||||
// outside the compacted MaxTime range.
|
||||
// The chunk.Bytes() method is not safe for open chunks hence the re-encoding.
|
||||
// This happens when snapshotting the head block or just fetching chunks from TSDB.
|
||||
//
|
||||
// TODO(codesome): think how to avoid the typecasting to verify when it is head block.
|
||||
_, isSafeChunk := p.currChkMeta.Chunk.(*safeChunk)
|
||||
if len(p.bufIter.Intervals) == 0 && !(isSafeChunk && p.currChkMeta.MaxTime == math.MaxInt64) {
|
||||
// If there is no overlap with deletion intervals AND it's NOT
|
||||
// an "open" head chunk, we can take chunk as it is.
|
||||
hcr, ok := p.chunks.(*headChunkReader)
|
||||
if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 {
|
||||
// ChunkWithCopy will copy the head chunk.
|
||||
var maxt int64
|
||||
p.currChkMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currChkMeta)
|
||||
// For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
|
||||
p.currChkMeta.MaxTime = maxt
|
||||
} else {
|
||||
p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta)
|
||||
}
|
||||
if p.err != nil {
|
||||
p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String())
|
||||
return false
|
||||
}
|
||||
|
||||
if len(p.bufIter.Intervals) == 0 {
|
||||
// If there is no overlap with deletion intervals, we can take chunk as it is.
|
||||
p.currDelIter = nil
|
||||
return true
|
||||
}
|
||||
|
||||
// We don't want the full chunk, or it's potentially still opened, take
|
||||
// just a part of it.
|
||||
// We don't want the full chunk, take just a part of it.
|
||||
p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(p.bufIter.Iter)
|
||||
p.currDelIter = &p.bufIter
|
||||
return true
|
||||
|
@ -626,7 +630,7 @@ func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType {
|
|||
}
|
||||
}
|
||||
|
||||
for p.next() {
|
||||
for p.next(false) {
|
||||
if p.currDelIter != nil {
|
||||
p.curr = p.currDelIter
|
||||
} else {
|
||||
|
@ -691,7 +695,7 @@ func (p *populateWithDelChunkSeriesIterator) reset(blockID ulid.ULID, cr ChunkRe
|
|||
}
|
||||
|
||||
func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
||||
if !p.next() {
|
||||
if !p.next(true) {
|
||||
return false
|
||||
}
|
||||
p.curr = p.currChkMeta
|
||||
|
|
|
@ -235,7 +235,19 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
|
|||
chksRes, errRes := storage.ExpandChunks(sres.Iterator(nil))
|
||||
rmChunkRefs(chksRes)
|
||||
require.Equal(t, errExp, errRes)
|
||||
require.Equal(t, chksExp, chksRes)
|
||||
|
||||
require.Equal(t, len(chksExp), len(chksRes))
|
||||
var exp, act [][]tsdbutil.Sample
|
||||
for i := range chksExp {
|
||||
samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil)
|
||||
require.NoError(t, err)
|
||||
exp = append(exp, samples)
|
||||
samples, err = storage.ExpandSamples(chksRes[i].Chunk.Iterator(nil), nil)
|
||||
require.NoError(t, err)
|
||||
act = append(act, samples)
|
||||
}
|
||||
|
||||
require.Equal(t, exp, act)
|
||||
}
|
||||
require.NoError(t, res.Err())
|
||||
})
|
||||
|
@ -2273,3 +2285,93 @@ func TestBlockBaseSeriesSet(t *testing.T) {
|
|||
require.NoError(t, bcs.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHeadChunkQuerier(b *testing.B) {
|
||||
db := openTestDB(b, nil, nil)
|
||||
defer func() {
|
||||
require.NoError(b, db.Close())
|
||||
}()
|
||||
|
||||
// 3h of data.
|
||||
numTimeseries := 100
|
||||
app := db.Appender(context.Background())
|
||||
for i := 0; i < 120*6; i++ {
|
||||
for j := 0; j < numTimeseries; j++ {
|
||||
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
|
||||
if i%10 == 0 {
|
||||
require.NoError(b, app.Commit())
|
||||
app = db.Appender(context.Background())
|
||||
}
|
||||
_, err := app.Append(0, lbls, int64(i*15)*time.Second.Milliseconds(), float64(i*100))
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
require.NoError(b, app.Commit())
|
||||
|
||||
querier, err := db.ChunkQuerier(context.Background(), math.MinInt64, math.MaxInt64)
|
||||
require.NoError(b, err)
|
||||
defer func(q storage.ChunkQuerier) {
|
||||
require.NoError(b, q.Close())
|
||||
}(querier)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
total := 0
|
||||
for ss.Next() {
|
||||
cs := ss.At()
|
||||
it := cs.Iterator(nil)
|
||||
for it.Next() {
|
||||
m := it.At()
|
||||
total += m.Chunk.NumSamples()
|
||||
}
|
||||
}
|
||||
_ = total
|
||||
require.NoError(b, ss.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHeadQuerier(b *testing.B) {
|
||||
db := openTestDB(b, nil, nil)
|
||||
defer func() {
|
||||
require.NoError(b, db.Close())
|
||||
}()
|
||||
|
||||
// 3h of data.
|
||||
numTimeseries := 100
|
||||
app := db.Appender(context.Background())
|
||||
for i := 0; i < 120*6; i++ {
|
||||
for j := 0; j < numTimeseries; j++ {
|
||||
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
|
||||
if i%10 == 0 {
|
||||
require.NoError(b, app.Commit())
|
||||
app = db.Appender(context.Background())
|
||||
}
|
||||
_, err := app.Append(0, lbls, int64(i*15)*time.Second.Milliseconds(), float64(i*100))
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
require.NoError(b, app.Commit())
|
||||
|
||||
querier, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64)
|
||||
require.NoError(b, err)
|
||||
defer func(q storage.Querier) {
|
||||
require.NoError(b, q.Close())
|
||||
}(querier)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
total := int64(0)
|
||||
for ss.Next() {
|
||||
cs := ss.At()
|
||||
it := cs.Iterator(nil)
|
||||
for it.Next() != chunkenc.ValNone {
|
||||
ts, _ := it.At()
|
||||
total += ts
|
||||
}
|
||||
}
|
||||
_ = total
|
||||
require.NoError(b, ss.Err())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
make: Nothing to be done for `test'.
|
252
util/documentcli/documentcli.go
Normal file
252
util/documentcli/documentcli.go
Normal file
|
@ -0,0 +1,252 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// If we decide to employ this auto generation of markdown documentation for
|
||||
// amtool and alertmanager, this package could potentially be moved to
|
||||
// prometheus/common. However, it is crucial to note that this functionality is
|
||||
// tailored specifically to the way in which the Prometheus documentation is
|
||||
// rendered, and should be avoided for use by third-party users.
|
||||
|
||||
package documentcli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
)
|
||||
|
||||
// GenerateMarkdown generates the markdown documentation for an application from
|
||||
// its kingpin ApplicationModel.
|
||||
func GenerateMarkdown(model *kingpin.ApplicationModel, writer io.Writer) error {
|
||||
h := header(model.Name, model.Help)
|
||||
if _, err := writer.Write(h); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeFlagTable(writer, 0, model.FlagGroupModel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeArgTable(writer, 0, model.ArgGroupModel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeCmdTable(writer, model.CmdGroupModel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeSubcommands(writer, 1, model.Name, model.CmdGroupModel.Commands)
|
||||
}
|
||||
|
||||
func header(title, help string) []byte {
|
||||
return []byte(fmt.Sprintf(`---
|
||||
title: %s
|
||||
---
|
||||
|
||||
# %s
|
||||
|
||||
%s
|
||||
|
||||
`, title, title, help))
|
||||
}
|
||||
|
||||
func createFlagRow(flag *kingpin.FlagModel) []string {
|
||||
defaultVal := ""
|
||||
if len(flag.Default) > 0 && len(flag.Default[0]) > 0 {
|
||||
defaultVal = fmt.Sprintf("`%s`", flag.Default[0])
|
||||
}
|
||||
|
||||
name := fmt.Sprintf(`<code class="text-nowrap">--%s</code>`, flag.Name)
|
||||
if flag.Short != '\x00' {
|
||||
name = fmt.Sprintf(`<code class="text-nowrap">-%c</code>, <code class="text-nowrap">--%s</code>`, flag.Short, flag.Name)
|
||||
}
|
||||
|
||||
return []string{name, flag.Help, defaultVal}
|
||||
}
|
||||
|
||||
func writeFlagTable(writer io.Writer, level int, fgm *kingpin.FlagGroupModel) error {
|
||||
if fgm == nil || len(fgm.Flags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rows := [][]string{
|
||||
{"Flag", "Description", "Default"},
|
||||
}
|
||||
|
||||
for _, flag := range fgm.Flags {
|
||||
if !flag.Hidden {
|
||||
row := createFlagRow(flag)
|
||||
rows = append(rows, row)
|
||||
}
|
||||
}
|
||||
|
||||
return writeTable(writer, rows, fmt.Sprintf("%s Flags", strings.Repeat("#", level+2)))
|
||||
}
|
||||
|
||||
func createArgRow(arg *kingpin.ArgModel) []string {
|
||||
defaultVal := ""
|
||||
if len(arg.Default) > 0 {
|
||||
defaultVal = fmt.Sprintf("`%s`", arg.Default[0])
|
||||
}
|
||||
|
||||
required := ""
|
||||
if arg.Required {
|
||||
required = "Yes"
|
||||
}
|
||||
|
||||
return []string{arg.Name, arg.Help, defaultVal, required}
|
||||
}
|
||||
|
||||
func writeArgTable(writer io.Writer, level int, agm *kingpin.ArgGroupModel) error {
|
||||
if agm == nil || len(agm.Args) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rows := [][]string{
|
||||
{"Argument", "Description", "Default", "Required"},
|
||||
}
|
||||
|
||||
for _, arg := range agm.Args {
|
||||
row := createArgRow(arg)
|
||||
rows = append(rows, row)
|
||||
}
|
||||
|
||||
return writeTable(writer, rows, fmt.Sprintf("%s Arguments", strings.Repeat("#", level+2)))
|
||||
}
|
||||
|
||||
func createCmdRow(cmd *kingpin.CmdModel) []string {
|
||||
if cmd.Hidden {
|
||||
return nil
|
||||
}
|
||||
return []string{cmd.FullCommand, cmd.Help}
|
||||
}
|
||||
|
||||
func writeCmdTable(writer io.Writer, cgm *kingpin.CmdGroupModel) error {
|
||||
if cgm == nil || len(cgm.Commands) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rows := [][]string{
|
||||
{"Command", "Description"},
|
||||
}
|
||||
|
||||
for _, cmd := range cgm.Commands {
|
||||
row := createCmdRow(cmd)
|
||||
if row != nil {
|
||||
rows = append(rows, row)
|
||||
}
|
||||
}
|
||||
|
||||
return writeTable(writer, rows, "## Commands")
|
||||
}
|
||||
|
||||
func writeTable(writer io.Writer, data [][]string, header string) error {
|
||||
if len(data) < 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
buf.WriteString(fmt.Sprintf("\n\n%s\n\n", header))
|
||||
columnsToRender := determineColumnsToRender(data)
|
||||
|
||||
headers := data[0]
|
||||
buf.WriteString("|")
|
||||
for _, j := range columnsToRender {
|
||||
buf.WriteString(fmt.Sprintf(" %s |", headers[j]))
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
|
||||
buf.WriteString("|")
|
||||
for range columnsToRender {
|
||||
buf.WriteString(" --- |")
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
|
||||
for i := 1; i < len(data); i++ {
|
||||
row := data[i]
|
||||
buf.WriteString("|")
|
||||
for _, j := range columnsToRender {
|
||||
buf.WriteString(fmt.Sprintf(" %s |", row[j]))
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
|
||||
if _, err := writer.Write(buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := writer.Write([]byte("\n\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func determineColumnsToRender(data [][]string) []int {
|
||||
columnsToRender := []int{}
|
||||
if len(data) == 0 {
|
||||
return columnsToRender
|
||||
}
|
||||
for j := 0; j < len(data[0]); j++ {
|
||||
renderColumn := false
|
||||
for i := 1; i < len(data); i++ {
|
||||
if data[i][j] != "" {
|
||||
renderColumn = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if renderColumn {
|
||||
columnsToRender = append(columnsToRender, j)
|
||||
}
|
||||
}
|
||||
return columnsToRender
|
||||
}
|
||||
|
||||
func writeSubcommands(writer io.Writer, level int, modelName string, commands []*kingpin.CmdModel) error {
|
||||
level++
|
||||
if level > 4 {
|
||||
level = 4
|
||||
}
|
||||
for _, cmd := range commands {
|
||||
if cmd.Hidden {
|
||||
continue
|
||||
}
|
||||
|
||||
help := cmd.Help
|
||||
if cmd.HelpLong != "" {
|
||||
help = cmd.HelpLong
|
||||
}
|
||||
if _, err := writer.Write([]byte(fmt.Sprintf("\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeFlagTable(writer, level, cmd.FlagGroupModel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeArgTable(writer, level, cmd.ArgGroupModel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cmd.CmdGroupModel != nil && len(cmd.CmdGroupModel.Commands) > 0 {
|
||||
if err := writeSubcommands(writer, level+1, modelName, cmd.CmdGroupModel.Commands); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1502,7 +1502,7 @@ func (api *API) remoteWrite(w http.ResponseWriter, r *http.Request) {
|
|||
if api.remoteWriteHandler != nil {
|
||||
api.remoteWriteHandler.ServeHTTP(w, r)
|
||||
} else {
|
||||
http.Error(w, "remote write receiver needs to be enabled with --enable-feature=remote-write-receiver", http.StatusNotFound)
|
||||
http.Error(w, "remote write receiver needs to be enabled with --web.enable-remote-write-receiver", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/codemirror-promql",
|
||||
"version": "0.43.0-rc.0",
|
||||
"version": "0.43.0",
|
||||
"description": "a CodeMirror mode for the PromQL language",
|
||||
"types": "dist/esm/index.d.ts",
|
||||
"module": "dist/esm/index.js",
|
||||
|
@ -29,7 +29,7 @@
|
|||
},
|
||||
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
||||
"dependencies": {
|
||||
"@prometheus-io/lezer-promql": "0.43.0-rc.0",
|
||||
"@prometheus-io/lezer-promql": "0.43.0",
|
||||
"lru-cache": "^6.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/lezer-promql",
|
||||
"version": "0.43.0-rc.0",
|
||||
"version": "0.43.0",
|
||||
"description": "lezer-based PromQL grammar",
|
||||
"main": "dist/index.cjs",
|
||||
"type": "module",
|
||||
|
|
14
web/ui/package-lock.json
generated
14
web/ui/package-lock.json
generated
|
@ -28,10 +28,10 @@
|
|||
},
|
||||
"module/codemirror-promql": {
|
||||
"name": "@prometheus-io/codemirror-promql",
|
||||
"version": "0.43.0-rc.0",
|
||||
"version": "0.43.0",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@prometheus-io/lezer-promql": "0.43.0-rc.0",
|
||||
"@prometheus-io/lezer-promql": "0.43.0",
|
||||
"lru-cache": "^6.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
@ -61,7 +61,7 @@
|
|||
},
|
||||
"module/lezer-promql": {
|
||||
"name": "@prometheus-io/lezer-promql",
|
||||
"version": "0.43.0-rc.0",
|
||||
"version": "0.43.0",
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
"@lezer/generator": "^1.2.2",
|
||||
|
@ -20763,7 +20763,7 @@
|
|||
},
|
||||
"react-app": {
|
||||
"name": "@prometheus-io/app",
|
||||
"version": "0.43.0-rc.0",
|
||||
"version": "0.43.0",
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^6.4.0",
|
||||
"@codemirror/commands": "^6.2.0",
|
||||
|
@ -20781,7 +20781,7 @@
|
|||
"@lezer/lr": "^1.3.1",
|
||||
"@nexucis/fuzzy": "^0.4.1",
|
||||
"@nexucis/kvsearch": "^0.8.1",
|
||||
"@prometheus-io/codemirror-promql": "0.43.0-rc.0",
|
||||
"@prometheus-io/codemirror-promql": "0.43.0",
|
||||
"bootstrap": "^4.6.2",
|
||||
"css.escape": "^1.5.1",
|
||||
"downshift": "^7.2.0",
|
||||
|
@ -23417,7 +23417,7 @@
|
|||
"@lezer/lr": "^1.3.1",
|
||||
"@nexucis/fuzzy": "^0.4.1",
|
||||
"@nexucis/kvsearch": "^0.8.1",
|
||||
"@prometheus-io/codemirror-promql": "0.43.0-rc.0",
|
||||
"@prometheus-io/codemirror-promql": "0.43.0",
|
||||
"@testing-library/react-hooks": "^7.0.2",
|
||||
"@types/enzyme": "^3.10.12",
|
||||
"@types/flot": "0.0.32",
|
||||
|
@ -23468,7 +23468,7 @@
|
|||
"@lezer/common": "^1.0.2",
|
||||
"@lezer/highlight": "^1.1.3",
|
||||
"@lezer/lr": "^1.3.1",
|
||||
"@prometheus-io/lezer-promql": "0.43.0-rc.0",
|
||||
"@prometheus-io/lezer-promql": "0.43.0",
|
||||
"@types/lru-cache": "^5.1.1",
|
||||
"isomorphic-fetch": "^3.0.0",
|
||||
"lru-cache": "^6.0.0",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/app",
|
||||
"version": "0.43.0-rc.0",
|
||||
"version": "0.43.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^6.4.0",
|
||||
|
@ -19,7 +19,7 @@
|
|||
"@lezer/common": "^1.0.2",
|
||||
"@nexucis/fuzzy": "^0.4.1",
|
||||
"@nexucis/kvsearch": "^0.8.1",
|
||||
"@prometheus-io/codemirror-promql": "0.43.0-rc.0",
|
||||
"@prometheus-io/codemirror-promql": "0.43.0",
|
||||
"bootstrap": "^4.6.2",
|
||||
"css.escape": "^1.5.1",
|
||||
"downshift": "^7.2.0",
|
||||
|
|
Loading…
Reference in a new issue