mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-13 06:47:28 -08:00
Merge branch 'main' into api-response-format-extension-point
Signed-off-by: Charles Korn <charleskorn@users.noreply.github.com>
This commit is contained in:
commit
eaad7c0fc8
6
.github/workflows/buf-lint.yml
vendored
6
.github/workflows/buf-lint.yml
vendored
|
@ -1,5 +1,5 @@
|
|||
name: buf.build
|
||||
on: # yamllint disable-line rule:truthy
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/buf-lint.yml"
|
||||
|
@ -10,7 +10,9 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.9.0
|
||||
- uses: bufbuild/buf-setup-action@v1.13.1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'prompb'
|
||||
|
|
6
.github/workflows/buf.yml
vendored
6
.github/workflows/buf.yml
vendored
|
@ -1,5 +1,5 @@
|
|||
name: buf.build
|
||||
on: # yamllint disable-line rule:truthy
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
@ -9,7 +9,9 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.9.0
|
||||
- uses: bufbuild/buf-setup-action@v1.13.1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'prompb'
|
||||
|
|
6
.github/workflows/ci.yml
vendored
6
.github/workflows/ci.yml
vendored
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
name: CI
|
||||
on: # yamllint disable-line rule:truthy
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
jobs:
|
||||
|
@ -145,7 +145,7 @@ jobs:
|
|||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@v3.3.1
|
||||
uses: golangci/golangci-lint-action@v3.4.0
|
||||
with:
|
||||
version: v1.50.1
|
||||
fuzzing:
|
||||
|
@ -196,7 +196,7 @@ jobs:
|
|||
with:
|
||||
node-version-file: "web/ui/.nvmrc"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- uses: actions/cache@v3.0.11
|
||||
- uses: actions/cache@v3.2.4
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
|
|
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
name: "CodeQL"
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
on:
|
||||
workflow_call:
|
||||
schedule:
|
||||
- cron: "26 14 * * 1"
|
||||
|
|
2
.github/workflows/funcbench.yml
vendored
2
.github/workflows/funcbench.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
on: # yamllint disable-line rule:truthy
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [funcbench_start]
|
||||
name: Funcbench Workflow
|
||||
|
|
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
|
@ -1,5 +1,5 @@
|
|||
name: CIFuzz
|
||||
on: # yamllint disable-line rule:truthy
|
||||
on:
|
||||
workflow_call:
|
||||
jobs:
|
||||
Fuzzing:
|
||||
|
|
4
.github/workflows/lock.yml
vendored
4
.github/workflows/lock.yml
vendored
|
@ -1,6 +1,6 @@
|
|||
name: 'Lock Threads'
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
on:
|
||||
schedule:
|
||||
- cron: '13 23 * * *'
|
||||
workflow_dispatch:
|
||||
|
@ -15,7 +15,7 @@ jobs:
|
|||
action:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v3
|
||||
- uses: dessant/lock-threads@v4
|
||||
with:
|
||||
process-only: 'issues'
|
||||
issue-inactive-days: '180'
|
||||
|
|
2
.github/workflows/prombench.yml
vendored
2
.github/workflows/prombench.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
on: # yamllint disable-line rule:truthy
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [prombench_start, prombench_restart, prombench_stop]
|
||||
name: Prombench Workflow
|
||||
|
|
2
.github/workflows/repo_sync.yml
vendored
2
.github/workflows/repo_sync.yml
vendored
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
name: Sync repo files
|
||||
on: # yamllint disable-line rule:truthy
|
||||
on:
|
||||
schedule:
|
||||
- cron: '44 17 * * *'
|
||||
jobs:
|
||||
|
|
26
CHANGELOG.md
26
CHANGELOG.md
|
@ -1,6 +1,28 @@
|
|||
# Changelog
|
||||
|
||||
## 2.41.0-rc.0 / 2022-12-14
|
||||
## 2.42.0 / 2023-01-31
|
||||
|
||||
This release comes with a bunch of feature coverage for native histograms and breaking changes.
|
||||
|
||||
If you are trying native histograms already, we recommend you remove the `wal` directory when upgrading.
|
||||
Because the old WAL record for native histograms is not backward compatible in v2.42.0, this will lead to some data loss for the latest data.
|
||||
|
||||
Additionally, if you scrape "float histograms" or use recording rules on native histograms in v2.42.0 (which writes float histograms),
|
||||
it is a one-way street since older versions do not support float histograms.
|
||||
|
||||
* [CHANGE] **breaking** TSDB: Changed WAL record format for the experimental native histograms. #11783
|
||||
* [FEATURE] Add 'keep_firing_for' field to alerting rules. #11827
|
||||
* [FEATURE] Promtool: Add support of selecting timeseries for TSDB dump. #11872
|
||||
* [ENHANCEMENT] Agent: Native histogram support. #11842
|
||||
* [ENHANCEMENT] Rules: Support native histograms in recording rules. #11838
|
||||
* [ENHANCEMENT] SD: Add container ID as a meta label for pod targets for Kubernetes. #11844
|
||||
* [ENHANCEMENT] SD: Add VM size label to azure service discovery. #11650
|
||||
* [ENHANCEMENT] Support native histograms in federation. #11830
|
||||
* [ENHANCEMENT] TSDB: Add gauge histogram support. #11783 #11840 #11814
|
||||
* [ENHANCEMENT] TSDB/Scrape: Support FloatHistogram that represents buckets as float64 values. #11522 #11817 #11716
|
||||
* [ENHANCEMENT] UI: Show individual scrape pools on /targets page. #11142
|
||||
|
||||
## 2.41.0 / 2022-12-20
|
||||
|
||||
* [FEATURE] Relabeling: Add `keepequal` and `dropequal` relabel actions. #11564
|
||||
* [FEATURE] Add support for HTTP proxy headers. #11712
|
||||
|
@ -61,7 +83,7 @@ Your existing histograms won't switch to native histograms until `NativeHistogra
|
|||
* [ENHANCEMENT] Kubernetes SD: Use protobuf encoding. #11353
|
||||
* [ENHANCEMENT] TSDB: Use golang.org/x/exp/slices for improved sorting speed. #11054 #11318 #11380
|
||||
* [ENHANCEMENT] Consul SD: Add enterprise admin partitions. Adds `__meta_consul_partition` label. Adds `partition` config in `consul_sd_config`. #11482
|
||||
* [BUGFIX] API: Fix API error codes for `/api/v1/labels` and `/api/v1/series`. #11356
|
||||
* [BUGFIX] API: Fix API error codes for `/api/v1/labels` and `/api/v1/series`. #11356
|
||||
|
||||
## 2.39.2 / 2022-11-09
|
||||
|
||||
|
|
|
@ -46,7 +46,9 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
|||
| v2.39 | 2022-09-21 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.40 | 2022-11-02 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.41 | 2022-12-14 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.42 | 2023-01-25 | **searching for volunteer** |
|
||||
| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) |
|
||||
| v2.43 | 2023-03-08 | **searching for volunteer** |
|
||||
| v2.44 | 2023-04-19 | **searching for volunteer** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ import (
|
|||
"gopkg.in/yaml.v2"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
promconfig "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
|
@ -74,6 +75,12 @@ const (
|
|||
var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
httpRoundTripper = api.DefaultRoundTripper
|
||||
serverURL *url.URL
|
||||
httpConfigFilePath string
|
||||
)
|
||||
|
||||
app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout)
|
||||
app.Version(version.Print("promtool"))
|
||||
app.HelpFlag.Short('h')
|
||||
|
@ -124,14 +131,15 @@ func main() {
|
|||
|
||||
queryCmd := app.Command("query", "Run query against a Prometheus server.")
|
||||
queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json")
|
||||
queryCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||
|
||||
queryInstantCmd := queryCmd.Command("instant", "Run instant query.")
|
||||
queryInstantServer := queryInstantCmd.Arg("server", "Prometheus server to query.").Required().URL()
|
||||
queryInstantCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
||||
queryInstantExpr := queryInstantCmd.Arg("expr", "PromQL query expression.").Required().String()
|
||||
queryInstantTime := queryInstantCmd.Flag("time", "Query evaluation time (RFC3339 or Unix timestamp).").String()
|
||||
|
||||
queryRangeCmd := queryCmd.Command("range", "Run range query.")
|
||||
queryRangeServer := queryRangeCmd.Arg("server", "Prometheus server to query.").Required().URL()
|
||||
queryRangeCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
||||
queryRangeExpr := queryRangeCmd.Arg("expr", "PromQL query expression.").Required().String()
|
||||
queryRangeHeaders := queryRangeCmd.Flag("header", "Extra headers to send to server.").StringMap()
|
||||
queryRangeBegin := queryRangeCmd.Flag("start", "Query range start time (RFC3339 or Unix timestamp).").String()
|
||||
|
@ -139,7 +147,7 @@ func main() {
|
|||
queryRangeStep := queryRangeCmd.Flag("step", "Query step size (duration).").Duration()
|
||||
|
||||
querySeriesCmd := queryCmd.Command("series", "Run series query.")
|
||||
querySeriesServer := querySeriesCmd.Arg("server", "Prometheus server to query.").Required().URL()
|
||||
querySeriesCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
||||
querySeriesMatch := querySeriesCmd.Flag("match", "Series selector. Can be specified multiple times.").Required().Strings()
|
||||
querySeriesBegin := querySeriesCmd.Flag("start", "Start time (RFC3339 or Unix timestamp).").String()
|
||||
querySeriesEnd := querySeriesCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
|
||||
|
@ -153,7 +161,7 @@ func main() {
|
|||
debugAllServer := debugAllCmd.Arg("server", "Prometheus server to get all debug information from.").Required().String()
|
||||
|
||||
queryLabelsCmd := queryCmd.Command("labels", "Run labels query.")
|
||||
queryLabelsServer := queryLabelsCmd.Arg("server", "Prometheus server to query.").Required().URL()
|
||||
queryLabelsCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
||||
queryLabelsName := queryLabelsCmd.Arg("name", "Label name to provide label values for.").Required().String()
|
||||
queryLabelsBegin := queryLabelsCmd.Flag("start", "Start time (RFC3339 or Unix timestamp).").String()
|
||||
queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
|
||||
|
@ -200,7 +208,8 @@ func main() {
|
|||
importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String()
|
||||
importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String()
|
||||
importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.")
|
||||
importRulesURL := importRulesCmd.Flag("url", "The URL for the Prometheus API with the data where the rule will be backfilled from.").Default("http://localhost:9090").URL()
|
||||
importRulesCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||
importRulesCmd.Flag("url", "The URL for the Prometheus API with the data where the rule will be backfilled from.").Default("http://localhost:9090").URLVar(&serverURL)
|
||||
importRulesStart := importRulesCmd.Flag("start", "The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required.").
|
||||
Required().String()
|
||||
importRulesEnd := importRulesCmd.Flag("end", "If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hours ago. Must be a RFC3339 formatted date or Unix timestamp.").String()
|
||||
|
@ -224,6 +233,22 @@ func main() {
|
|||
p = &promqlPrinter{}
|
||||
}
|
||||
|
||||
if httpConfigFilePath != "" {
|
||||
if serverURL != nil && serverURL.User.Username() != "" {
|
||||
kingpin.Fatalf("Cannot set base auth in the server URL and use a http.config.file at the same time")
|
||||
}
|
||||
var err error
|
||||
httpConfig, _, err := config_util.LoadHTTPConfigFile(httpConfigFilePath)
|
||||
if err != nil {
|
||||
kingpin.Fatalf("Failed to load HTTP config file: %v", err)
|
||||
}
|
||||
|
||||
httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", config_util.WithUserAgent("promtool/"+version.Version))
|
||||
if err != nil {
|
||||
kingpin.Fatalf("Failed to create a new HTTP round tripper: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var noDefaultScrapePort bool
|
||||
for _, f := range *featureList {
|
||||
opts := strings.Split(f, ",")
|
||||
|
@ -258,13 +283,13 @@ func main() {
|
|||
os.Exit(CheckMetrics(*checkMetricsExtended))
|
||||
|
||||
case queryInstantCmd.FullCommand():
|
||||
os.Exit(QueryInstant(*queryInstantServer, *queryInstantExpr, *queryInstantTime, p))
|
||||
os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p))
|
||||
|
||||
case queryRangeCmd.FullCommand():
|
||||
os.Exit(QueryRange(*queryRangeServer, *queryRangeHeaders, *queryRangeExpr, *queryRangeBegin, *queryRangeEnd, *queryRangeStep, p))
|
||||
os.Exit(QueryRange(serverURL, httpRoundTripper, *queryRangeHeaders, *queryRangeExpr, *queryRangeBegin, *queryRangeEnd, *queryRangeStep, p))
|
||||
|
||||
case querySeriesCmd.FullCommand():
|
||||
os.Exit(QuerySeries(*querySeriesServer, *querySeriesMatch, *querySeriesBegin, *querySeriesEnd, p))
|
||||
os.Exit(QuerySeries(serverURL, httpRoundTripper, *querySeriesMatch, *querySeriesBegin, *querySeriesEnd, p))
|
||||
|
||||
case debugPprofCmd.FullCommand():
|
||||
os.Exit(debugPprof(*debugPprofServer))
|
||||
|
@ -276,7 +301,7 @@ func main() {
|
|||
os.Exit(debugAll(*debugAllServer))
|
||||
|
||||
case queryLabelsCmd.FullCommand():
|
||||
os.Exit(QueryLabels(*queryLabelsServer, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p))
|
||||
os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p))
|
||||
|
||||
case testRulesCmd.FullCommand():
|
||||
os.Exit(RulesUnitTest(
|
||||
|
@ -303,7 +328,7 @@ func main() {
|
|||
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
|
||||
|
||||
case importRulesCmd.FullCommand():
|
||||
os.Exit(checkErr(importRules(*importRulesURL, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
|
||||
os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -795,12 +820,13 @@ func checkMetricsExtended(r io.Reader) ([]metricStat, int, error) {
|
|||
}
|
||||
|
||||
// QueryInstant performs an instant query against a Prometheus server.
|
||||
func QueryInstant(url *url.URL, query, evalTime string, p printer) int {
|
||||
func QueryInstant(url *url.URL, roundTripper http.RoundTripper, query, evalTime string, p printer) int {
|
||||
if url.Scheme == "" {
|
||||
url.Scheme = "http"
|
||||
}
|
||||
config := api.Config{
|
||||
Address: url.String(),
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
}
|
||||
|
||||
// Create new client.
|
||||
|
@ -835,12 +861,13 @@ func QueryInstant(url *url.URL, query, evalTime string, p printer) int {
|
|||
}
|
||||
|
||||
// QueryRange performs a range query against a Prometheus server.
|
||||
func QueryRange(url *url.URL, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
|
||||
func QueryRange(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
|
||||
if url.Scheme == "" {
|
||||
url.Scheme = "http"
|
||||
}
|
||||
config := api.Config{
|
||||
Address: url.String(),
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
}
|
||||
|
||||
if len(headers) > 0 {
|
||||
|
@ -848,7 +875,7 @@ func QueryRange(url *url.URL, headers map[string]string, query, start, end strin
|
|||
for key, value := range headers {
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
return http.DefaultTransport.RoundTrip(req)
|
||||
return roundTripper.RoundTrip(req)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -908,12 +935,13 @@ func QueryRange(url *url.URL, headers map[string]string, query, start, end strin
|
|||
}
|
||||
|
||||
// QuerySeries queries for a series against a Prometheus server.
|
||||
func QuerySeries(url *url.URL, matchers []string, start, end string, p printer) int {
|
||||
func QuerySeries(url *url.URL, roundTripper http.RoundTripper, matchers []string, start, end string, p printer) int {
|
||||
if url.Scheme == "" {
|
||||
url.Scheme = "http"
|
||||
}
|
||||
config := api.Config{
|
||||
Address: url.String(),
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
}
|
||||
|
||||
// Create new client.
|
||||
|
@ -944,12 +972,13 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer)
|
|||
}
|
||||
|
||||
// QueryLabels queries for label values against a Prometheus server.
|
||||
func QueryLabels(url *url.URL, matchers []string, name, start, end string, p printer) int {
|
||||
func QueryLabels(url *url.URL, roundTripper http.RoundTripper, matchers []string, name, start, end string, p printer) int {
|
||||
if url.Scheme == "" {
|
||||
url.Scheme = "http"
|
||||
}
|
||||
config := api.Config{
|
||||
Address: url.String(),
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
}
|
||||
|
||||
// Create new client.
|
||||
|
@ -1154,7 +1183,7 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) {
|
|||
|
||||
// importRules backfills recording rules from the files provided. The output are blocks of data
|
||||
// at the outputDir location.
|
||||
func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error {
|
||||
func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error {
|
||||
ctx := context.Background()
|
||||
var stime, etime time.Time
|
||||
var err error
|
||||
|
@ -1184,7 +1213,8 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBl
|
|||
maxBlockDuration: maxBlockDuration,
|
||||
}
|
||||
client, err := api.NewClient(api.Config{
|
||||
Address: url.String(),
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("new api client error: %w", err)
|
||||
|
|
|
@ -56,14 +56,14 @@ func TestQueryRange(t *testing.T) {
|
|||
require.Equal(t, nil, err)
|
||||
|
||||
p := &promqlPrinter{}
|
||||
exitCode := QueryRange(urlObject, map[string]string{}, "up", "0", "300", 0, p)
|
||||
exitCode := QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 0, p)
|
||||
require.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
|
||||
form := getRequest().Form
|
||||
require.Equal(t, "up", form.Get("query"))
|
||||
require.Equal(t, "1", form.Get("step"))
|
||||
require.Equal(t, 0, exitCode)
|
||||
|
||||
exitCode = QueryRange(urlObject, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p)
|
||||
exitCode = QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p)
|
||||
require.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
|
||||
form = getRequest().Form
|
||||
require.Equal(t, "up", form.Get("query"))
|
||||
|
@ -79,7 +79,7 @@ func TestQueryInstant(t *testing.T) {
|
|||
require.Equal(t, nil, err)
|
||||
|
||||
p := &promqlPrinter{}
|
||||
exitCode := QueryInstant(urlObject, "up", "300", p)
|
||||
exitCode := QueryInstant(urlObject, http.DefaultTransport, "up", "300", p)
|
||||
require.Equal(t, "/api/v1/query", getRequest().URL.Path)
|
||||
form := getRequest().Form
|
||||
require.Equal(t, "up", form.Get("query"))
|
||||
|
|
|
@ -365,7 +365,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
|
|||
{
|
||||
// Define a handler that will return status 500.
|
||||
handler: func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(500)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
},
|
||||
errMessage: "Unexpected response code: 500 ()",
|
||||
},
|
||||
|
|
|
@ -74,16 +74,16 @@ func createTestHTTPServer(t *testing.T, responder discoveryResponder) *httptest.
|
|||
|
||||
discoveryRes, err := responder(discoveryReq)
|
||||
if err != nil {
|
||||
w.WriteHeader(500)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if discoveryRes == nil {
|
||||
w.WriteHeader(304)
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(200)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
data, err := protoJSONMarshalOptions.Marshal(discoveryRes)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -123,6 +123,10 @@ expr: <string>
|
|||
# Alerts which have not yet fired for long enough are considered pending.
|
||||
[ for: <duration> | default = 0s ]
|
||||
|
||||
# How long an alert will continue firing after the condition that triggered it
|
||||
# has cleared.
|
||||
[ keep_firing_for: <duration> | default = 0s ]
|
||||
|
||||
# Labels to add or overwrite for each alert.
|
||||
labels:
|
||||
[ <labelname>: <tmpl_string> ]
|
||||
|
|
|
@ -449,9 +449,7 @@ raw numbers.
|
|||
|
||||
The keys `"histogram"` and `"histograms"` only show up if the experimental
|
||||
native histograms are present in the response. Their placeholder `<histogram>`
|
||||
is explained in detail in its own section below. Any one object will only have
|
||||
the `"value"`/`"values"` key or the `"histogram"`/`"histograms"` key, but not
|
||||
both.
|
||||
is explained in detail in its own section below.
|
||||
|
||||
### Range vectors
|
||||
|
||||
|
@ -469,6 +467,9 @@ Range vectors are returned as result type `matrix`. The corresponding
|
|||
]
|
||||
```
|
||||
|
||||
Each series could have the `"values"` key, or the `"histograms"` key, or both.
|
||||
For a given timestamp, there will only be one sample of either float or histogram type.
|
||||
|
||||
### Instant vectors
|
||||
|
||||
Instant vectors are returned as result type `vector`. The corresponding
|
||||
|
@ -485,6 +486,8 @@ Instant vectors are returned as result type `vector`. The corresponding
|
|||
]
|
||||
```
|
||||
|
||||
Each series could have the `"value"` key, or the `"histogram"` key, but not both.
|
||||
|
||||
### Scalars
|
||||
|
||||
Scalar results are returned as result type `scalar`. The corresponding
|
||||
|
|
|
@ -6,8 +6,8 @@ require (
|
|||
github.com/go-kit/log v0.2.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.10.0
|
||||
github.com/prometheus/client_golang v1.13.1
|
||||
github.com/influxdata/influxdb v1.11.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
|
@ -35,7 +35,7 @@ require (
|
|||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0 // indirect
|
||||
|
|
|
@ -81,6 +81,7 @@ github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8
|
|||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
|
@ -119,8 +120,8 @@ github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec h1:jAF71e0KoaY
|
|||
github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc=
|
||||
github.com/hetznercloud/hcloud-go v1.35.2 h1:eEDtmDiI2plZ2UQmj4YpiYse5XbtpXOUBpAdIOLxzgE=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/influxdata/influxdb v1.10.0 h1:8xDpt8KO3lzrzf/ss+l8r42AGUZvoITu5824berK7SE=
|
||||
github.com/influxdata/influxdb v1.10.0/go.mod h1:IVPuoA2pOOxau/NguX7ipW0Jp9Bn+dMWlo0+VOscevU=
|
||||
github.com/influxdata/influxdb v1.11.0 h1:0X+ZsbcOWc6AEi5MHee9BYqXCKmz8IZsljrRYjmV8Qg=
|
||||
github.com/influxdata/influxdb v1.11.0/go.mod h1:V93tJcidY0Zh0LtSONZWnXXGDyt20dtVf+Ddp4EnhaA=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.2 h1:es5R5sVmjHFrYNBbJfAeHF+16GheaJMyc63xWxIAec4=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
|
@ -184,12 +185,13 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
|
|||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.1 h1:3gMjIY2+/hzmqhtUC/aQNYldJA6DtH3CgQvwS+02K1c=
|
||||
github.com/prometheus/client_golang v1.13.1/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
|
|
68
go.mod
68
go.mod
|
@ -5,14 +5,14 @@ go 1.18
|
|||
require (
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.28
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.21
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.22
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
||||
github.com/aws/aws-sdk-go v1.44.159
|
||||
github.com/aws/aws-sdk-go v1.44.187
|
||||
github.com/cespare/xxhash/v2 v2.2.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
|
||||
github.com/digitalocean/godo v1.91.1
|
||||
github.com/docker/docker v20.10.21+incompatible
|
||||
github.com/digitalocean/godo v1.95.0
|
||||
github.com/docker/docker v20.10.23+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.10.3
|
||||
github.com/envoyproxy/protoc-gen-validate v0.9.1
|
||||
|
@ -23,31 +23,31 @@ require (
|
|||
github.com/go-zookeeper/zk v1.0.3
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b
|
||||
github.com/gophercloud/gophercloud v1.1.1
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.18.0
|
||||
github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230124213148-69fd1a0e4bf7
|
||||
github.com/hetznercloud/hcloud-go v1.39.0
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.3
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
github.com/linode/linodego v1.9.3
|
||||
github.com/linode/linodego v1.12.0
|
||||
github.com/miekg/dns v1.1.50
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/ovh/go-ovh v1.3.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/alertmanager v0.24.0
|
||||
github.com/prometheus/alertmanager v0.25.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/common v0.39.0
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.8.2
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
|
@ -61,33 +61,35 @@ require (
|
|||
go.uber.org/atomic v1.10.0
|
||||
go.uber.org/automaxprocs v1.5.1
|
||||
go.uber.org/goleak v1.2.0
|
||||
golang.org/x/net v0.4.0
|
||||
golang.org/x/oauth2 v0.3.0
|
||||
golang.org/x/net v0.5.0
|
||||
golang.org/x/oauth2 v0.4.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/sys v0.3.0
|
||||
golang.org/x/sys v0.4.0
|
||||
golang.org/x/time v0.3.0
|
||||
golang.org/x/tools v0.4.0
|
||||
google.golang.org/api v0.104.0
|
||||
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37
|
||||
google.golang.org/grpc v1.51.0
|
||||
golang.org/x/tools v0.5.0
|
||||
google.golang.org/api v0.108.0
|
||||
google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2
|
||||
google.golang.org/grpc v1.52.1
|
||||
google.golang.org/protobuf v1.28.1
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.26.0
|
||||
k8s.io/apimachinery v0.26.0
|
||||
k8s.io/client-go v0.26.0
|
||||
k8s.io/api v0.26.1
|
||||
k8s.io/apimachinery v0.26.1
|
||||
k8s.io/client-go v0.26.1
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.80.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.2.2 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.4.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.13.0 // indirect
|
||||
cloud.google.com/go/compute v1.14.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
|
@ -113,14 +115,14 @@ require (
|
|||
github.com/go-kit/kit v0.12.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.2 // indirect
|
||||
github.com/go-openapi/errors v0.20.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/loads v0.21.1 // indirect
|
||||
github.com/go-openapi/spec v0.20.4 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/go-openapi/validate v0.21.0 // indirect
|
||||
github.com/go-openapi/loads v0.21.2 // indirect
|
||||
github.com/go-openapi/spec v0.20.7 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/validate v0.22.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
|
@ -131,7 +133,7 @@ require (
|
|||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect
|
||||
|
@ -141,7 +143,7 @@ require (
|
|||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||
github.com/hashicorp/serf v0.10.1 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
|
@ -165,16 +167,16 @@ require (
|
|||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.mongodb.org/mongo-driver v1.10.2 // indirect
|
||||
go.mongodb.org/mongo-driver v1.11.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.34.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20221212164502-fae10dda9338
|
||||
golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
golang.org/x/term v0.4.0 // indirect
|
||||
golang.org/x/text v0.6.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.6 // indirect
|
||||
|
|
182
go.sum
182
go.sum
|
@ -19,10 +19,10 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
|||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU=
|
||||
cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
|
||||
cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k=
|
||||
cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
|
||||
cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
|
||||
cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
|
||||
|
@ -45,8 +45,8 @@ github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW
|
|||
github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
|
||||
github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
|
@ -97,9 +97,8 @@ github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:W
|
|||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.159 h1:9odtuHAYQE9tQKyuX6ny1U1MHeH5/yzeCJi96g9H4DU=
|
||||
github.com/aws/aws-sdk-go v1.44.159/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.187 h1:D5CsRomPnlwDHJCanL2mtaLIcbhjiWxNh5j8zvaWdJA=
|
||||
github.com/aws/aws-sdk-go v1.44.187/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -108,14 +107,12 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
|||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
|
@ -153,16 +150,15 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz
|
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/digitalocean/godo v1.91.1 h1:1o30VOCu1aC6488qBd0SkQiBeAZ35RSTvLwCA1pQMhc=
|
||||
github.com/digitalocean/godo v1.91.1/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA=
|
||||
github.com/digitalocean/godo v1.95.0 h1:S48/byPKui7RHZc1wYEPfRvkcEvToADNb5I3guu95xg=
|
||||
github.com/digitalocean/godo v1.95.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
|
||||
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jmLhoaEcVEfK8dyA=
|
||||
github.com/docker/docker v20.10.23+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
|
@ -192,7 +188,6 @@ github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH
|
|||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
|
@ -213,7 +208,6 @@ github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgO
|
|||
github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4=
|
||||
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
|
@ -226,43 +220,42 @@ github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
|||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU=
|
||||
github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
|
||||
github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc=
|
||||
github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo=
|
||||
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8=
|
||||
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc=
|
||||
github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
|
||||
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
|
||||
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
|
||||
github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0=
|
||||
github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
|
||||
github.com/go-openapi/runtime v0.23.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk=
|
||||
github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M=
|
||||
github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
|
||||
github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
|
||||
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
|
||||
github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
|
||||
github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI=
|
||||
github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
|
||||
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
|
||||
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o=
|
||||
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI=
|
||||
github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y=
|
||||
github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY=
|
||||
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
||||
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
|
||||
|
@ -289,9 +282,7 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
|
|||
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
|
||||
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
|
||||
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
|
||||
github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
|
@ -372,16 +363,16 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 h1:D5iJJZKAi0rU4e/5E58BkrnN+xeCDjAIqcm1GGxAGSI=
|
||||
github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc=
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
|
||||
|
@ -429,8 +420,9 @@ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh
|
|||
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
|
||||
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
|
@ -451,17 +443,17 @@ github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09
|
|||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
|
||||
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d h1:kEWrUx7mld3c6HRcO2KhfD1MYBkofuZfEfDwCRQ9aMU=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d/go.mod h1:8FB4gnSJAfRGxfG+v0pZEPfqhZG7nZ87xDeUyw3gEMI=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230124213148-69fd1a0e4bf7 h1:XOdd3JHyeQnBRxotBo9ibxBFiYGuYhQU25s/YeV2cTU=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230124213148-69fd1a0e4bf7/go.mod h1:xYYd4dybIhRhhzDemKx7Ddt8CvCosgrEek8YM7/cF0A=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
|
@ -477,7 +469,6 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
|
|||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.3 h1:vb6yqdpiqaytvreM0bsn2pXw+1YDvEk2RKSmBAQvgDQ=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.3/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME=
|
||||
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
|
@ -518,18 +509,16 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
|||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ=
|
||||
github.com/linode/linodego v1.9.3/go.mod h1:h6AuFR/JpqwwM/vkj7s8KV3iGN8/jxn+zc437F8SZ8w=
|
||||
github.com/linode/linodego v1.12.0 h1:33mOIrZ+gVva14gyJMKPZ85mQGovAvZCEP1ftgmFBjA=
|
||||
github.com/linode/linodego v1.12.0/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk=
|
||||
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
|
@ -542,7 +531,6 @@ github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kN
|
|||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
|
@ -577,7 +565,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
|||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||
|
@ -627,7 +614,6 @@ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go
|
|||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
|
@ -654,8 +640,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
|||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prometheus/alertmanager v0.24.0 h1:HBWR3lk4uy3ys+naDZthDdV7yEsxpaNeZuUS+hJgrOw=
|
||||
github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI=
|
||||
github.com/prometheus/alertmanager v0.25.0 h1:vbXKUR6PYRiZPRIKfmXaG+dmCKG52RtPL4Btl8hQGvg=
|
||||
github.com/prometheus/alertmanager v0.25.0/go.mod h1:MEZ3rFVHqKZsw7IcNS/m4AWZeXThmJhumpiWR4eHU/w=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
|
@ -663,7 +649,6 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
|
|||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
|
@ -681,14 +666,12 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
|
||||
github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM=
|
||||
github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
|
@ -697,7 +680,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
|||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
|
@ -706,21 +688,18 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
|||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12 h1:Aaz4T7dZp7cB2cv7D/tGtRdSMh48sRaDYr7Jh0HV4qQ=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shoenig/test v0.4.6 h1:S1pAVs5L1TSRen3N1YQNtBZIh9Z6d1PyQSUDUweMTqk=
|
||||
github.com/shoenig/test v0.6.0 h1:rU0ymLHmCRqz14gABce/DzYryKU+uaWqobCBvAY6DtU=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs=
|
||||
github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k=
|
||||
github.com/simonpasquier/klog-gokit/v3 v3.0.0 h1:J0QrVhAULISHWN05PeXX/xMqJBjnpl2fAuO8uHdQGsA=
|
||||
|
@ -740,7 +719,6 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ
|
|||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
|
@ -779,23 +757,20 @@ github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23n
|
|||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
|
||||
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
|
||||
go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
|
||||
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
|
||||
go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k=
|
||||
go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8=
|
||||
go.mongodb.org/mongo-driver v1.11.0 h1:FZKhBSTydeuffHj9CBjXlR8vQLee1cQyTWYPA6/tqiE=
|
||||
go.mongodb.org/mongo-driver v1.11.0/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
|
@ -829,7 +804,6 @@ go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJP
|
|||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk=
|
||||
|
@ -853,8 +827,6 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3
|
|||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
|
@ -871,8 +843,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 h1:OvjRkcNHnf6/W5FZXSxODbxwD+X7fspczG7Jn/xQVD4=
|
||||
golang.org/x/exp v0.0.0-20221212164502-fae10dda9338/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874 h1:kWC3b7j6Fu09SnEBr7P4PuQyM0R6sqyH9R+EjIvT1nQ=
|
||||
golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -897,7 +869,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
|
@ -944,15 +915,12 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1
|
|||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -960,8 +928,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
|
|||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
|
||||
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
|
||||
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1027,7 +995,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1038,23 +1005,19 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1064,8 +1027,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1126,15 +1089,13 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
|
|||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
|
||||
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
|
@ -1150,8 +1111,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
|||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg=
|
||||
google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U=
|
||||
google.golang.org/api v0.108.0 h1:WVBc/faN0DkKtR43Q/7+tPny9ZoLZdIiAyG5Q9vFClg=
|
||||
google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1193,8 +1154,8 @@ google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70=
|
||||
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2 h1:O97sLx/Xmb/KIZHB/2/BzofxBs5QmmR0LcihPtllmbc=
|
||||
google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1217,8 +1178,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
|
||||
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
|
||||
google.golang.org/grpc v1.52.1 h1:2NpOPk5g5Xtb0qebIEs7hNIa++PdtZLo2AQUpc1YnSU=
|
||||
google.golang.org/grpc v1.52.1/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1252,7 +1213,6 @@ gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
|||
gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI=
|
||||
gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/telebot.v3 v3.0.0/go.mod h1:7rExV8/0mDDNu9epSrDm/8j22KLaActH1Tbee6YjzWg=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
|
@ -1282,12 +1242,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I=
|
||||
k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
|
||||
k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg=
|
||||
k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
|
||||
k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8=
|
||||
k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg=
|
||||
k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ=
|
||||
k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg=
|
||||
k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ=
|
||||
k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
|
||||
k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU=
|
||||
k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE=
|
||||
k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s=
|
||||
k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
|
||||
|
|
|
@ -143,22 +143,24 @@ type RuleGroup struct {
|
|||
|
||||
// Rule describes an alerting or recording rule.
|
||||
type Rule struct {
|
||||
Record string `yaml:"record,omitempty"`
|
||||
Alert string `yaml:"alert,omitempty"`
|
||||
Expr string `yaml:"expr"`
|
||||
For model.Duration `yaml:"for,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
Annotations map[string]string `yaml:"annotations,omitempty"`
|
||||
Record string `yaml:"record,omitempty"`
|
||||
Alert string `yaml:"alert,omitempty"`
|
||||
Expr string `yaml:"expr"`
|
||||
For model.Duration `yaml:"for,omitempty"`
|
||||
KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
Annotations map[string]string `yaml:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules.
|
||||
type RuleNode struct {
|
||||
Record yaml.Node `yaml:"record,omitempty"`
|
||||
Alert yaml.Node `yaml:"alert,omitempty"`
|
||||
Expr yaml.Node `yaml:"expr"`
|
||||
For model.Duration `yaml:"for,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
Annotations map[string]string `yaml:"annotations,omitempty"`
|
||||
Record yaml.Node `yaml:"record,omitempty"`
|
||||
Alert yaml.Node `yaml:"alert,omitempty"`
|
||||
Expr yaml.Node `yaml:"expr"`
|
||||
For model.Duration `yaml:"for,omitempty"`
|
||||
KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
Annotations map[string]string `yaml:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// Validate the rule and return a list of encountered errors.
|
||||
|
@ -208,6 +210,12 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
|
|||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
if r.KeepFiringFor != 0 {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: fmt.Errorf("invalid recording rule name: %s", r.Record.Value),
|
||||
|
|
|
@ -73,12 +73,21 @@ func TestParseFileFailure(t *testing.T) {
|
|||
filename: "invalid_label_name.bad.yaml",
|
||||
errMsg: "invalid label name",
|
||||
},
|
||||
{
|
||||
filename: "record_and_for.bad.yaml",
|
||||
errMsg: "invalid field 'for' in recording rule",
|
||||
},
|
||||
{
|
||||
filename: "record_and_keep_firing_for.bad.yaml",
|
||||
errMsg: "invalid field 'keep_firing_for' in recording rule",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range table {
|
||||
_, errs := ParseFile(filepath.Join("testdata", c.filename))
|
||||
require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename)
|
||||
require.Error(t, errs[0], c.errMsg, "Expected error for %s.", c.filename)
|
||||
require.Error(t, errs[0])
|
||||
require.Containsf(t, errs[0].Error(), c.errMsg, "Expected error for %s.", c.filename)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
6
model/rulefmt/testdata/record_and_for.bad.yaml
vendored
Normal file
6
model/rulefmt/testdata/record_and_for.bad.yaml
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
groups:
|
||||
- name: yolo
|
||||
rules:
|
||||
- record: Hello
|
||||
expr: 1
|
||||
for: 1m
|
6
model/rulefmt/testdata/record_and_keep_firing_for.bad.yaml
vendored
Normal file
6
model/rulefmt/testdata/record_and_keep_firing_for.bad.yaml
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
groups:
|
||||
- name: yolo
|
||||
rules:
|
||||
- record: Hello
|
||||
expr: 1
|
||||
keep_firing_for: 1m
|
|
@ -10,6 +10,7 @@ import (
|
|||
math "math"
|
||||
math_bits "math/bits"
|
||||
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
types "github.com/gogo/protobuf/types"
|
||||
)
|
||||
|
@ -281,12 +282,12 @@ func (m *Quantile) GetValue() float64 {
|
|||
}
|
||||
|
||||
type Summary struct {
|
||||
SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"`
|
||||
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
|
||||
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"`
|
||||
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
|
||||
Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Summary) Reset() { *m = Summary{} }
|
||||
|
@ -336,7 +337,7 @@ func (m *Summary) GetSampleSum() float64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Summary) GetQuantile() []*Quantile {
|
||||
func (m *Summary) GetQuantile() []Quantile {
|
||||
if m != nil {
|
||||
return m.Quantile
|
||||
}
|
||||
|
@ -395,7 +396,7 @@ type Histogram struct {
|
|||
SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"`
|
||||
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
|
||||
// Buckets for the conventional histogram.
|
||||
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket,omitempty"`
|
||||
Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"`
|
||||
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
|
||||
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets.
|
||||
|
@ -406,14 +407,14 @@ type Histogram struct {
|
|||
ZeroCount uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
|
||||
ZeroCountFloat float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat,proto3" json:"zero_count_float,omitempty"`
|
||||
// Negative buckets for the native histogram.
|
||||
NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan,proto3" json:"negative_span,omitempty"`
|
||||
NegativeSpan []BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan,proto3" json:"negative_span"`
|
||||
// Use either "negative_delta" or "negative_count", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"`
|
||||
NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"`
|
||||
// Positive buckets for the native histogram.
|
||||
PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span,omitempty"`
|
||||
PositiveSpan []BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span"`
|
||||
// Use either "positive_delta" or "positive_count", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
|
@ -478,7 +479,7 @@ func (m *Histogram) GetSampleSum() float64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetBucket() []*Bucket {
|
||||
func (m *Histogram) GetBucket() []Bucket {
|
||||
if m != nil {
|
||||
return m.Bucket
|
||||
}
|
||||
|
@ -513,7 +514,7 @@ func (m *Histogram) GetZeroCountFloat() float64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetNegativeSpan() []*BucketSpan {
|
||||
func (m *Histogram) GetNegativeSpan() []BucketSpan {
|
||||
if m != nil {
|
||||
return m.NegativeSpan
|
||||
}
|
||||
|
@ -534,7 +535,7 @@ func (m *Histogram) GetNegativeCount() []float64 {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetPositiveSpan() []*BucketSpan {
|
||||
func (m *Histogram) GetPositiveSpan() []BucketSpan {
|
||||
if m != nil {
|
||||
return m.PositiveSpan
|
||||
}
|
||||
|
@ -688,7 +689,7 @@ func (m *BucketSpan) GetLength() uint32 {
|
|||
}
|
||||
|
||||
type Exemplar struct {
|
||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty"`
|
||||
Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"`
|
||||
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Timestamp *types.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
|
@ -729,7 +730,7 @@ func (m *Exemplar) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
|
||||
|
||||
func (m *Exemplar) GetLabel() []*LabelPair {
|
||||
func (m *Exemplar) GetLabel() []LabelPair {
|
||||
if m != nil {
|
||||
return m.Label
|
||||
}
|
||||
|
@ -751,16 +752,16 @@ func (m *Exemplar) GetTimestamp() *types.Timestamp {
|
|||
}
|
||||
|
||||
type Metric struct {
|
||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty"`
|
||||
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge,proto3" json:"gauge,omitempty"`
|
||||
Counter *Counter `protobuf:"bytes,3,opt,name=counter,proto3" json:"counter,omitempty"`
|
||||
Summary *Summary `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"`
|
||||
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped,proto3" json:"untyped,omitempty"`
|
||||
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram,proto3" json:"histogram,omitempty"`
|
||||
TimestampMs int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"`
|
||||
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge,proto3" json:"gauge,omitempty"`
|
||||
Counter *Counter `protobuf:"bytes,3,opt,name=counter,proto3" json:"counter,omitempty"`
|
||||
Summary *Summary `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"`
|
||||
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped,proto3" json:"untyped,omitempty"`
|
||||
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram,proto3" json:"histogram,omitempty"`
|
||||
TimestampMs int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Metric) Reset() { *m = Metric{} }
|
||||
|
@ -796,7 +797,7 @@ func (m *Metric) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_Metric proto.InternalMessageInfo
|
||||
|
||||
func (m *Metric) GetLabel() []*LabelPair {
|
||||
func (m *Metric) GetLabel() []LabelPair {
|
||||
if m != nil {
|
||||
return m.Label
|
||||
}
|
||||
|
@ -849,7 +850,7 @@ type MetricFamily struct {
|
|||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"`
|
||||
Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
|
||||
Metric []*Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric,omitempty"`
|
||||
Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -909,7 +910,7 @@ func (m *MetricFamily) GetType() MetricType {
|
|||
return MetricType_COUNTER
|
||||
}
|
||||
|
||||
func (m *MetricFamily) GetMetric() []*Metric {
|
||||
func (m *MetricFamily) GetMetric() []Metric {
|
||||
if m != nil {
|
||||
return m.Metric
|
||||
}
|
||||
|
@ -937,63 +938,65 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_d1e5ddb18987a258 = []byte{
|
||||
// 894 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdd, 0x6e, 0xe3, 0x44,
|
||||
0x18, 0xc5, 0xcd, 0xaf, 0xbf, 0x34, 0xdd, 0xec, 0x50, 0xad, 0xac, 0x42, 0xdb, 0x60, 0x09, 0xa9,
|
||||
0x20, 0xe4, 0x08, 0xe8, 0x0a, 0x84, 0xe0, 0xa2, 0xdd, 0xcd, 0x76, 0x91, 0xc8, 0xee, 0x32, 0x49,
|
||||
0x2e, 0x16, 0x2e, 0xac, 0x49, 0x3a, 0x4d, 0x2c, 0x3c, 0x1e, 0x63, 0x8f, 0x57, 0x94, 0x17, 0xe0,
|
||||
0x9a, 0x57, 0xe0, 0x61, 0x10, 0x97, 0x3c, 0x02, 0x2a, 0x0f, 0x02, 0x9a, 0x3f, 0xbb, 0x59, 0x39,
|
||||
0xcb, 0xb2, 0x77, 0x99, 0xe3, 0x73, 0xbe, 0x39, 0x67, 0x3c, 0x39, 0x06, 0x3f, 0xe2, 0xa3, 0x34,
|
||||
0xe3, 0x8c, 0x8a, 0x35, 0x2d, 0xf2, 0xd1, 0x32, 0x8e, 0x68, 0x22, 0x46, 0x8c, 0x8a, 0x2c, 0x5a,
|
||||
0xe6, 0x41, 0x9a, 0x71, 0xc1, 0xd1, 0x7e, 0xc4, 0x83, 0x8a, 0x13, 0x68, 0xce, 0xc1, 0xf1, 0x8a,
|
||||
0xf3, 0x55, 0x4c, 0x47, 0x8a, 0xb3, 0x28, 0xae, 0x46, 0x22, 0x62, 0x34, 0x17, 0x84, 0xa5, 0x5a,
|
||||
0xe6, 0xdf, 0x07, 0xf7, 0x1b, 0xb2, 0xa0, 0xf1, 0x33, 0x12, 0x65, 0x08, 0x41, 0x33, 0x21, 0x8c,
|
||||
0x7a, 0xce, 0xd0, 0x39, 0x71, 0xb1, 0xfa, 0x8d, 0xf6, 0xa1, 0xf5, 0x82, 0xc4, 0x05, 0xf5, 0x76,
|
||||
0x14, 0xa8, 0x17, 0xfe, 0x21, 0xb4, 0x2e, 0x48, 0xb1, 0xba, 0xf5, 0x58, 0x6a, 0x1c, 0xfb, 0xf8,
|
||||
0x7b, 0xe8, 0x3c, 0xe0, 0x45, 0x22, 0x68, 0x56, 0x4f, 0x40, 0x5f, 0x40, 0x97, 0xfe, 0x44, 0x59,
|
||||
0x1a, 0x93, 0x4c, 0x0d, 0xee, 0x7d, 0x72, 0x14, 0xd4, 0x05, 0x08, 0xc6, 0x86, 0x85, 0x4b, 0xbe,
|
||||
0xff, 0x25, 0x74, 0xbf, 0x2d, 0x48, 0x22, 0xa2, 0x98, 0xa2, 0x03, 0xe8, 0xfe, 0x68, 0x7e, 0x9b,
|
||||
0x0d, 0xca, 0xf5, 0xa6, 0xf3, 0xd2, 0xda, 0x2f, 0x0e, 0x74, 0xa6, 0x05, 0x63, 0x24, 0xbb, 0x46,
|
||||
0xef, 0xc1, 0x6e, 0x4e, 0x58, 0x1a, 0xd3, 0x70, 0x29, 0xdd, 0xaa, 0x09, 0x4d, 0xdc, 0xd3, 0x98,
|
||||
0x0a, 0x80, 0x0e, 0x01, 0x0c, 0x25, 0x2f, 0x98, 0x99, 0xe4, 0x6a, 0x64, 0x5a, 0x30, 0x99, 0xa3,
|
||||
0xdc, 0xbf, 0x31, 0x6c, 0x6c, 0xcf, 0x61, 0x1d, 0x57, 0xfe, 0xfc, 0x63, 0xe8, 0xcc, 0x13, 0x71,
|
||||
0x9d, 0xd2, 0xcb, 0x2d, 0xa7, 0xf8, 0x77, 0x13, 0xdc, 0xc7, 0x51, 0x2e, 0xf8, 0x2a, 0x23, 0xec,
|
||||
0x75, 0xcc, 0x7e, 0x04, 0xe8, 0x36, 0x25, 0xbc, 0x8a, 0x39, 0x11, 0x5e, 0x53, 0xcd, 0x1c, 0xdc,
|
||||
0x22, 0x3e, 0x92, 0xf8, 0x7f, 0x45, 0x3b, 0x85, 0xf6, 0xa2, 0x58, 0xfe, 0x40, 0x85, 0x09, 0xf6,
|
||||
0x6e, 0x7d, 0xb0, 0x73, 0xc5, 0xc1, 0x86, 0x8b, 0xee, 0x41, 0x3b, 0x5f, 0xae, 0x29, 0x23, 0x5e,
|
||||
0x6b, 0xe8, 0x9c, 0xdc, 0xc5, 0x66, 0x85, 0xde, 0x87, 0xbd, 0x9f, 0x69, 0xc6, 0x43, 0xb1, 0xce,
|
||||
0x68, 0xbe, 0xe6, 0xf1, 0xa5, 0xd7, 0x56, 0x1b, 0xf6, 0x25, 0x3a, 0xb3, 0xa0, 0xf4, 0xa4, 0x68,
|
||||
0x3a, 0x62, 0x47, 0x45, 0x74, 0x25, 0xa2, 0x03, 0x9e, 0xc0, 0xa0, 0x7a, 0x6c, 0xe2, 0x75, 0xd5,
|
||||
0x9c, 0xbd, 0x92, 0xa4, 0xc3, 0x8d, 0xa1, 0x9f, 0xd0, 0x15, 0x11, 0xd1, 0x0b, 0x1a, 0xe6, 0x29,
|
||||
0x49, 0x3c, 0x57, 0x85, 0x18, 0xbe, 0x2a, 0xc4, 0x34, 0x25, 0x09, 0xde, 0xb5, 0x32, 0xb9, 0x92,
|
||||
0xb6, 0xcb, 0x31, 0x97, 0x34, 0x16, 0xc4, 0x83, 0x61, 0xe3, 0x04, 0xe1, 0x72, 0xf8, 0x43, 0x09,
|
||||
0x6e, 0xd0, 0xb4, 0xf5, 0xde, 0xb0, 0x21, 0xd3, 0x59, 0x54, 0xdb, 0x1f, 0x43, 0x3f, 0xe5, 0x79,
|
||||
0x54, 0x99, 0xda, 0x7d, 0x5d, 0x53, 0x56, 0x66, 0x4d, 0x95, 0x63, 0xb4, 0xa9, 0xbe, 0x36, 0x65,
|
||||
0xd1, 0xd2, 0x54, 0x49, 0xd3, 0xa6, 0xf6, 0xb4, 0x29, 0x8b, 0x2a, 0x53, 0xfe, 0xef, 0x0e, 0xb4,
|
||||
0xf5, 0x56, 0xe8, 0x03, 0x18, 0x2c, 0x0b, 0x56, 0xc4, 0xb7, 0x83, 0xe8, 0x6b, 0x76, 0xa7, 0xc2,
|
||||
0x75, 0x94, 0x53, 0xb8, 0xf7, 0x32, 0x75, 0xe3, 0xba, 0xed, 0xbf, 0x24, 0xd0, 0x6f, 0xe5, 0x18,
|
||||
0x7a, 0x45, 0x9a, 0xd2, 0x2c, 0x5c, 0xf0, 0x22, 0xb9, 0x34, 0x77, 0x0e, 0x14, 0x74, 0x2e, 0x91,
|
||||
0x8d, 0x5e, 0x68, 0xfc, 0xef, 0x5e, 0x80, 0xea, 0xc8, 0xe4, 0x45, 0xe4, 0x57, 0x57, 0x39, 0xd5,
|
||||
0x09, 0xee, 0x62, 0xb3, 0x92, 0x78, 0x4c, 0x93, 0x95, 0x58, 0xab, 0xdd, 0xfb, 0xd8, 0xac, 0xfc,
|
||||
0x5f, 0x1d, 0xe8, 0xda, 0xa1, 0xe8, 0x3e, 0xb4, 0x62, 0xd9, 0x8a, 0x9e, 0xa3, 0x5e, 0xd0, 0x71,
|
||||
0xbd, 0x87, 0xb2, 0x38, 0xb1, 0x66, 0xd7, 0x37, 0x0e, 0xfa, 0x1c, 0xdc, 0xb2, 0x75, 0x4d, 0xa8,
|
||||
0x83, 0x40, 0xf7, 0x72, 0x60, 0x7b, 0x39, 0x98, 0x59, 0x06, 0xae, 0xc8, 0xfe, 0x3f, 0x3b, 0xd0,
|
||||
0x9e, 0xa8, 0x96, 0x7f, 0x53, 0x47, 0x1f, 0x43, 0x6b, 0x25, 0x7b, 0xda, 0x94, 0xec, 0x3b, 0xf5,
|
||||
0x32, 0x55, 0xe5, 0x58, 0x33, 0xd1, 0x67, 0xd0, 0x59, 0xea, 0xee, 0x36, 0x66, 0x0f, 0xeb, 0x45,
|
||||
0xa6, 0xe0, 0xb1, 0x65, 0x4b, 0x61, 0xae, 0x8b, 0x55, 0xdd, 0x81, 0xad, 0x42, 0xd3, 0xbe, 0xd8,
|
||||
0xb2, 0xa5, 0xb0, 0xd0, 0x45, 0xa8, 0x4a, 0x63, 0xab, 0xd0, 0xb4, 0x25, 0xb6, 0x6c, 0xf4, 0x15,
|
||||
0xb8, 0x6b, 0xdb, 0x8f, 0xaa, 0x2c, 0xb6, 0x1e, 0x4c, 0x59, 0xa3, 0xb8, 0x52, 0xc8, 0x46, 0x2d,
|
||||
0xcf, 0x3a, 0x64, 0xb9, 0x6a, 0xa4, 0x06, 0xee, 0x95, 0xd8, 0x24, 0xf7, 0x7f, 0x73, 0x60, 0x57,
|
||||
0xbf, 0x81, 0x47, 0x84, 0x45, 0xf1, 0x75, 0xed, 0x27, 0x12, 0x41, 0x73, 0x4d, 0xe3, 0xd4, 0x7c,
|
||||
0x21, 0xd5, 0x6f, 0x74, 0x0a, 0x4d, 0xe9, 0x51, 0x1d, 0xe1, 0xde, 0xb6, 0x7f, 0xb8, 0x9e, 0x3c,
|
||||
0xbb, 0x4e, 0x29, 0x56, 0x6c, 0xd9, 0xb9, 0xfa, 0xab, 0xee, 0x35, 0x5f, 0xd5, 0xb9, 0x5a, 0x87,
|
||||
0x0d, 0xf7, 0xc3, 0x05, 0x40, 0x35, 0x09, 0xf5, 0xa0, 0xf3, 0xe0, 0xe9, 0xfc, 0xc9, 0x6c, 0x8c,
|
||||
0x07, 0x6f, 0x21, 0x17, 0x5a, 0x17, 0x67, 0xf3, 0x8b, 0xf1, 0xc0, 0x91, 0xf8, 0x74, 0x3e, 0x99,
|
||||
0x9c, 0xe1, 0xe7, 0x83, 0x1d, 0xb9, 0x98, 0x3f, 0x99, 0x3d, 0x7f, 0x36, 0x7e, 0x38, 0x68, 0xa0,
|
||||
0x3e, 0xb8, 0x8f, 0xbf, 0x9e, 0xce, 0x9e, 0x5e, 0xe0, 0xb3, 0xc9, 0xa0, 0x89, 0xde, 0x86, 0x3b,
|
||||
0x4a, 0x13, 0x56, 0x60, 0xeb, 0xdc, 0xff, 0xe3, 0xe6, 0xc8, 0xf9, 0xf3, 0xe6, 0xc8, 0xf9, 0xeb,
|
||||
0xe6, 0xc8, 0xf9, 0x6e, 0x3f, 0xe2, 0x61, 0x65, 0x2b, 0xd4, 0xb6, 0x16, 0x6d, 0x75, 0x9b, 0x3f,
|
||||
0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xbc, 0x25, 0x8b, 0xaf, 0x08, 0x00, 0x00,
|
||||
// 923 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
|
||||
0x18, 0xad, 0x1b, 0xe7, 0xc7, 0x5f, 0x36, 0xdb, 0x74, 0x88, 0x2a, 0x6b, 0x61, 0x37, 0xc1, 0x12,
|
||||
0xd2, 0x82, 0x50, 0x22, 0xa0, 0x08, 0x54, 0x40, 0x62, 0xb7, 0xdd, 0x6e, 0x51, 0x49, 0x5b, 0x26,
|
||||
0xc9, 0x45, 0xe1, 0xc2, 0x9a, 0x64, 0x67, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf,
|
||||
0x25, 0xd7, 0xbc, 0x02, 0x4f, 0x82, 0x7a, 0xc9, 0x13, 0x20, 0xb4, 0xef, 0xc0, 0x3d, 0x9a, 0x3f,
|
||||
0x3b, 0x5b, 0x39, 0x85, 0x15, 0x77, 0x33, 0xc7, 0xe7, 0xfb, 0xe6, 0x9c, 0x99, 0xc9, 0x99, 0x80,
|
||||
0x17, 0xb2, 0x49, 0x9a, 0xb1, 0x98, 0xf2, 0x35, 0x2d, 0xf2, 0xc9, 0x2a, 0x0a, 0x69, 0xc2, 0x27,
|
||||
0x31, 0xe5, 0x59, 0xb8, 0xca, 0xc7, 0x69, 0xc6, 0x38, 0x43, 0x83, 0x90, 0x8d, 0x2b, 0xce, 0x58,
|
||||
0x71, 0xf6, 0x06, 0x01, 0x0b, 0x98, 0x24, 0x4c, 0xc4, 0x48, 0x71, 0xf7, 0x86, 0x01, 0x63, 0x41,
|
||||
0x44, 0x27, 0x72, 0xb6, 0x2c, 0xce, 0x27, 0x3c, 0x8c, 0x69, 0xce, 0x49, 0x9c, 0x2a, 0x82, 0xf7,
|
||||
0x31, 0x38, 0x5f, 0x93, 0x25, 0x8d, 0x9e, 0x91, 0x30, 0x43, 0x08, 0xec, 0x84, 0xc4, 0xd4, 0xb5,
|
||||
0x46, 0xd6, 0xa1, 0x83, 0xe5, 0x18, 0x0d, 0xa0, 0xf9, 0x82, 0x44, 0x05, 0x75, 0x6f, 0x4a, 0x50,
|
||||
0x4d, 0xbc, 0x7d, 0x68, 0x9e, 0x92, 0x22, 0xd8, 0xf8, 0x2c, 0x6a, 0x2c, 0xf3, 0xf9, 0x3b, 0x68,
|
||||
0xdf, 0x67, 0x45, 0xc2, 0x69, 0x56, 0x4f, 0x40, 0xf7, 0xa0, 0x43, 0x7f, 0xa4, 0x71, 0x1a, 0x91,
|
||||
0x4c, 0x36, 0xee, 0x7e, 0x78, 0x30, 0xae, 0xb3, 0x35, 0x3e, 0xd1, 0x2c, 0x5c, 0xf2, 0xbd, 0xcf,
|
||||
0xa1, 0xf3, 0x4d, 0x41, 0x12, 0x1e, 0x46, 0x14, 0xed, 0x41, 0xe7, 0x07, 0x3d, 0xd6, 0x0b, 0x94,
|
||||
0xf3, 0xab, 0xca, 0x4b, 0x69, 0xbf, 0x58, 0xd0, 0x9e, 0x15, 0x71, 0x4c, 0xb2, 0x0b, 0xf4, 0x36,
|
||||
0xec, 0xe4, 0x24, 0x4e, 0x23, 0xea, 0xaf, 0x84, 0x5a, 0xd9, 0xc1, 0xc6, 0x5d, 0x85, 0x49, 0x03,
|
||||
0x68, 0x1f, 0x40, 0x53, 0xf2, 0x22, 0xd6, 0x9d, 0x1c, 0x85, 0xcc, 0x8a, 0x18, 0x7d, 0xb9, 0xb1,
|
||||
0x7e, 0x63, 0xd4, 0xd8, 0xee, 0xc3, 0x28, 0x3e, 0xb6, 0x5f, 0xfe, 0x39, 0xbc, 0x51, 0xa9, 0xf4,
|
||||
0x86, 0xd0, 0x5e, 0x24, 0xfc, 0x22, 0xa5, 0x67, 0x5b, 0xf6, 0xf2, 0x6f, 0x1b, 0x9c, 0x47, 0x61,
|
||||
0xce, 0x59, 0x90, 0x91, 0xf8, 0xbf, 0x48, 0x7e, 0x1f, 0xd0, 0x26, 0xc5, 0x3f, 0x8f, 0x18, 0xe1,
|
||||
0xae, 0x2d, 0x7b, 0xf6, 0x37, 0x88, 0x0f, 0x05, 0xfe, 0x6f, 0x06, 0xef, 0x41, 0x6b, 0x59, 0xac,
|
||||
0xbe, 0xa7, 0x5c, 0xdb, 0x7b, 0xab, 0xde, 0xde, 0xb1, 0xe4, 0x68, 0x73, 0xba, 0x02, 0xdd, 0x81,
|
||||
0x56, 0xbe, 0x5a, 0xd3, 0x98, 0xb8, 0xcd, 0x91, 0x75, 0x78, 0x1b, 0xeb, 0x19, 0x7a, 0x07, 0x76,
|
||||
0x7f, 0xa2, 0x19, 0xf3, 0xf9, 0x3a, 0xa3, 0xf9, 0x9a, 0x45, 0x67, 0x6e, 0x4b, 0x2e, 0xdb, 0x13,
|
||||
0xe8, 0xdc, 0x80, 0x42, 0x99, 0xa4, 0x29, 0xa3, 0x6d, 0x69, 0xd4, 0x11, 0x88, 0xb2, 0x79, 0x08,
|
||||
0xfd, 0xea, 0xb3, 0x36, 0xd9, 0x91, 0x7d, 0x76, 0x4b, 0x92, 0xb2, 0xf8, 0x18, 0x7a, 0x09, 0x0d,
|
||||
0x08, 0x0f, 0x5f, 0x50, 0x3f, 0x4f, 0x49, 0xe2, 0x3a, 0xd2, 0xca, 0xe8, 0x75, 0x56, 0x66, 0x29,
|
||||
0x49, 0xb4, 0x9d, 0x1d, 0x53, 0x2c, 0x30, 0x21, 0xbe, 0x6c, 0x76, 0x46, 0x23, 0x4e, 0x5c, 0x18,
|
||||
0x35, 0x0e, 0x11, 0x2e, 0x97, 0x78, 0x20, 0xc0, 0x2b, 0x34, 0x65, 0xa0, 0x3b, 0x6a, 0x08, 0x8f,
|
||||
0x06, 0x55, 0x26, 0x1e, 0x43, 0x2f, 0x65, 0x79, 0x58, 0x49, 0xdb, 0xb9, 0x9e, 0x34, 0x53, 0x6c,
|
||||
0xa4, 0x95, 0xcd, 0x94, 0xb4, 0x9e, 0x92, 0x66, 0xd0, 0x52, 0x5a, 0x49, 0x53, 0xd2, 0x76, 0x95,
|
||||
0x34, 0x83, 0x4a, 0x69, 0xde, 0xef, 0x16, 0xb4, 0xd4, 0x82, 0xe8, 0x5d, 0xe8, 0xaf, 0x8a, 0xb8,
|
||||
0x88, 0x36, 0xed, 0xa8, 0x8b, 0x77, 0xab, 0xc2, 0x95, 0xa1, 0xbb, 0x70, 0xe7, 0x55, 0xea, 0x95,
|
||||
0x0b, 0x38, 0x78, 0xa5, 0x40, 0x9d, 0xd0, 0x10, 0xba, 0x45, 0x9a, 0xd2, 0xcc, 0x5f, 0xb2, 0x22,
|
||||
0x39, 0xd3, 0xb7, 0x10, 0x24, 0x74, 0x2c, 0x90, 0x2b, 0x79, 0xd1, 0xb8, 0x76, 0x5e, 0x40, 0xb5,
|
||||
0x71, 0xe2, 0x52, 0xb2, 0xf3, 0xf3, 0x9c, 0x2a, 0x07, 0xb7, 0xb1, 0x9e, 0x09, 0x3c, 0xa2, 0x49,
|
||||
0xc0, 0xd7, 0x72, 0xf5, 0x1e, 0xd6, 0x33, 0xef, 0x57, 0x0b, 0x3a, 0xa6, 0x29, 0xfa, 0x0c, 0x9a,
|
||||
0x91, 0x48, 0x4b, 0xd7, 0x92, 0xc7, 0x34, 0xac, 0xd7, 0x50, 0x06, 0xaa, 0x3e, 0x25, 0x55, 0x53,
|
||||
0x9f, 0x47, 0xe8, 0x53, 0x70, 0xca, 0x4c, 0xd6, 0xd6, 0xf6, 0xc6, 0x2a, 0xb5, 0xc7, 0x26, 0xb5,
|
||||
0xc7, 0x73, 0xc3, 0xc0, 0x15, 0xd9, 0xfb, 0xb9, 0x01, 0xad, 0xa9, 0x7c, 0x19, 0xfe, 0x9f, 0xae,
|
||||
0x0f, 0xa0, 0x19, 0x88, 0x2c, 0xd7, 0x41, 0xfc, 0x66, 0x7d, 0xb1, 0x8c, 0x7b, 0xac, 0x98, 0xe8,
|
||||
0x13, 0x68, 0xaf, 0x54, 0xbe, 0x6b, 0xc9, 0xfb, 0xf5, 0x45, 0xfa, 0x11, 0xc0, 0x86, 0x2d, 0x0a,
|
||||
0x73, 0x15, 0xbe, 0xf2, 0x3e, 0x6c, 0x2d, 0xd4, 0x09, 0x8d, 0x0d, 0x5b, 0x14, 0x16, 0x2a, 0x26,
|
||||
0x65, 0x98, 0x6c, 0x2d, 0xd4, 0x59, 0x8a, 0x0d, 0x1b, 0x7d, 0x01, 0xce, 0xda, 0xa4, 0xa7, 0x0c,
|
||||
0x91, 0xad, 0xdb, 0x53, 0x86, 0x2c, 0xae, 0x2a, 0x44, 0xde, 0x96, 0x3b, 0xee, 0xc7, 0xb9, 0x4c,
|
||||
0xaa, 0x06, 0xee, 0x96, 0xd8, 0x34, 0xf7, 0x7e, 0xb3, 0x60, 0x47, 0x9d, 0xc3, 0x43, 0x12, 0x87,
|
||||
0xd1, 0x45, 0xed, 0x33, 0x8a, 0xc0, 0x5e, 0xd3, 0x28, 0xd5, 0xaf, 0xa8, 0x1c, 0xa3, 0xbb, 0x60,
|
||||
0x0b, 0x8d, 0x72, 0x0b, 0x77, 0xb7, 0xfd, 0xe6, 0x55, 0xe7, 0xf9, 0x45, 0x4a, 0xb1, 0x64, 0x8b,
|
||||
0x44, 0x56, 0xff, 0x07, 0x5c, 0xfb, 0x75, 0x89, 0xac, 0xea, 0x4c, 0x22, 0xab, 0x8a, 0xf7, 0x96,
|
||||
0x00, 0x55, 0x3f, 0xd4, 0x85, 0xf6, 0xfd, 0xa7, 0x8b, 0x27, 0xf3, 0x13, 0xdc, 0xbf, 0x81, 0x1c,
|
||||
0x68, 0x9e, 0x1e, 0x2d, 0x4e, 0x4f, 0xfa, 0x96, 0xc0, 0x67, 0x8b, 0xe9, 0xf4, 0x08, 0x3f, 0xef,
|
||||
0xdf, 0x14, 0x93, 0xc5, 0x93, 0xf9, 0xf3, 0x67, 0x27, 0x0f, 0xfa, 0x0d, 0xd4, 0x03, 0xe7, 0xd1,
|
||||
0x57, 0xb3, 0xf9, 0xd3, 0x53, 0x7c, 0x34, 0xed, 0xdb, 0xe8, 0x0d, 0xb8, 0x25, 0x6b, 0xfc, 0x0a,
|
||||
0x6c, 0x1e, 0x7b, 0x2f, 0x2f, 0x0f, 0xac, 0x3f, 0x2e, 0x0f, 0xac, 0xbf, 0x2e, 0x0f, 0xac, 0x6f,
|
||||
0x07, 0x21, 0xf3, 0x2b, 0x71, 0xbe, 0x12, 0xb7, 0x6c, 0xc9, 0x9b, 0xfd, 0xd1, 0x3f, 0x01, 0x00,
|
||||
0x00, 0xff, 0xff, 0x52, 0x2d, 0xb5, 0x31, 0xef, 0x08, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *LabelPair) Marshal() (dAtA []byte, err error) {
|
||||
|
@ -2496,7 +2499,7 @@ func (m *Summary) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Quantile = append(m.Quantile, &Quantile{})
|
||||
m.Quantile = append(m.Quantile, Quantile{})
|
||||
if err := m.Quantile[len(m.Quantile)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2673,7 +2676,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Bucket = append(m.Bucket, &Bucket{})
|
||||
m.Bucket = append(m.Bucket, Bucket{})
|
||||
if err := m.Bucket[len(m.Bucket)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2780,7 +2783,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.NegativeSpan = append(m.NegativeSpan, &BucketSpan{})
|
||||
m.NegativeSpan = append(m.NegativeSpan, BucketSpan{})
|
||||
if err := m.NegativeSpan[len(m.NegativeSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2946,7 +2949,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.PositiveSpan = append(m.PositiveSpan, &BucketSpan{})
|
||||
m.PositiveSpan = append(m.PositiveSpan, BucketSpan{})
|
||||
if err := m.PositiveSpan[len(m.PositiveSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3382,7 +3385,7 @@ func (m *Exemplar) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Label = append(m.Label, &LabelPair{})
|
||||
m.Label = append(m.Label, LabelPair{})
|
||||
if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3514,7 +3517,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Label = append(m.Label, &LabelPair{})
|
||||
m.Label = append(m.Label, LabelPair{})
|
||||
if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3881,7 +3884,7 @@ func (m *MetricFamily) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Metric = append(m.Metric, &Metric{})
|
||||
m.Metric = append(m.Metric, Metric{})
|
||||
if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ syntax = "proto3";
|
|||
package io.prometheus.client;
|
||||
option go_package = "io_prometheus_client";
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
message LabelPair {
|
||||
|
@ -58,9 +59,9 @@ message Quantile {
|
|||
}
|
||||
|
||||
message Summary {
|
||||
uint64 sample_count = 1;
|
||||
double sample_sum = 2;
|
||||
repeated Quantile quantile = 3;
|
||||
uint64 sample_count = 1;
|
||||
double sample_sum = 2;
|
||||
repeated Quantile quantile = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message Untyped {
|
||||
|
@ -72,7 +73,7 @@ message Histogram {
|
|||
double sample_count_float = 4; // Overrides sample_count if > 0.
|
||||
double sample_sum = 2;
|
||||
// Buckets for the conventional histogram.
|
||||
repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional.
|
||||
repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional.
|
||||
|
||||
// Everything below here is for native histograms (also known as sparse histograms).
|
||||
// Native histograms are an experimental feature without stability guarantees.
|
||||
|
@ -88,20 +89,20 @@ message Histogram {
|
|||
double zero_count_float = 8; // Overrides sb_zero_count if > 0.
|
||||
|
||||
// Negative buckets for the native histogram.
|
||||
repeated BucketSpan negative_span = 9;
|
||||
repeated BucketSpan negative_span = 9 [(gogoproto.nullable) = false];
|
||||
// Use either "negative_delta" or "negative_count", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double negative_count = 11; // Absolute count of each bucket.
|
||||
repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double negative_count = 11; // Absolute count of each bucket.
|
||||
|
||||
// Positive buckets for the native histogram.
|
||||
repeated BucketSpan positive_span = 12;
|
||||
repeated BucketSpan positive_span = 12 [(gogoproto.nullable) = false];
|
||||
// Use either "positive_delta" or "positive_count", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double positive_count = 14; // Absolute count of each bucket.
|
||||
repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double positive_count = 14; // Absolute count of each bucket.
|
||||
}
|
||||
|
||||
message Bucket {
|
||||
|
@ -123,24 +124,24 @@ message BucketSpan {
|
|||
}
|
||||
|
||||
message Exemplar {
|
||||
repeated LabelPair label = 1;
|
||||
repeated LabelPair label = 1 [(gogoproto.nullable) = false];
|
||||
double value = 2;
|
||||
google.protobuf.Timestamp timestamp = 3; // OpenMetrics-style.
|
||||
}
|
||||
|
||||
message Metric {
|
||||
repeated LabelPair label = 1;
|
||||
Gauge gauge = 2;
|
||||
Counter counter = 3;
|
||||
Summary summary = 4;
|
||||
Untyped untyped = 5;
|
||||
Histogram histogram = 7;
|
||||
int64 timestamp_ms = 6;
|
||||
repeated LabelPair label = 1 [(gogoproto.nullable) = false];
|
||||
Gauge gauge = 2;
|
||||
Counter counter = 3;
|
||||
Summary summary = 4;
|
||||
Untyped untyped = 5;
|
||||
Histogram histogram = 7;
|
||||
int64 timestamp_ms = 6;
|
||||
}
|
||||
|
||||
message MetricFamily {
|
||||
string name = 1;
|
||||
string help = 2;
|
||||
MetricType type = 3;
|
||||
repeated Metric metric = 4;
|
||||
repeated Metric metric = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
|
|
@ -383,14 +383,14 @@ type Histogram struct {
|
|||
// *Histogram_ZeroCountFloat
|
||||
ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"`
|
||||
// Negative Buckets.
|
||||
NegativeSpans []*BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans,omitempty"`
|
||||
NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"`
|
||||
// Use either "negative_deltas" or "negative_counts", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"`
|
||||
NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"`
|
||||
// Positive Buckets.
|
||||
PositiveSpans []*BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans,omitempty"`
|
||||
PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"`
|
||||
// Use either "positive_deltas" or "positive_counts", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
|
@ -529,7 +529,7 @@ func (m *Histogram) GetZeroCountFloat() float64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetNegativeSpans() []*BucketSpan {
|
||||
func (m *Histogram) GetNegativeSpans() []BucketSpan {
|
||||
if m != nil {
|
||||
return m.NegativeSpans
|
||||
}
|
||||
|
@ -550,7 +550,7 @@ func (m *Histogram) GetNegativeCounts() []float64 {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetPositiveSpans() []*BucketSpan {
|
||||
func (m *Histogram) GetPositiveSpans() []BucketSpan {
|
||||
if m != nil {
|
||||
return m.PositiveSpans
|
||||
}
|
||||
|
@ -1143,75 +1143,75 @@ func init() {
|
|||
func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) }
|
||||
|
||||
var fileDescriptor_d938547f84707355 = []byte{
|
||||
// 1075 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
|
||||
0x14, 0x5e, 0xdb, 0x89, 0x13, 0x9f, 0xfc, 0xd4, 0x3b, 0xda, 0x16, 0x53, 0xd1, 0x6d, 0xb0, 0x54,
|
||||
0x88, 0x10, 0xca, 0xaa, 0x85, 0x0b, 0x2a, 0x0a, 0xd2, 0x6e, 0xc9, 0xfe, 0x88, 0x26, 0x51, 0x27,
|
||||
0x59, 0x41, 0xb9, 0x89, 0x66, 0x93, 0xd9, 0xc4, 0xaa, 0xff, 0xf0, 0x4c, 0xaa, 0x0d, 0xef, 0xc1,
|
||||
0x1d, 0x2f, 0xc1, 0x3d, 0x12, 0xb7, 0xbd, 0xe4, 0x09, 0x10, 0xda, 0x2b, 0x1e, 0x03, 0xcd, 0xb1,
|
||||
0x1d, 0x3b, 0xdd, 0x82, 0x54, 0xee, 0xe6, 0x7c, 0xe7, 0x3b, 0x33, 0x9f, 0xe7, 0xfc, 0x8c, 0xa1,
|
||||
0x21, 0xd7, 0x31, 0x17, 0xbd, 0x38, 0x89, 0x64, 0x44, 0x20, 0x4e, 0xa2, 0x80, 0xcb, 0x25, 0x5f,
|
||||
0x89, 0xbb, 0x7b, 0x8b, 0x68, 0x11, 0x21, 0x7c, 0xa0, 0x56, 0x29, 0xc3, 0xfd, 0x45, 0x87, 0xf6,
|
||||
0x80, 0xcb, 0xc4, 0x9b, 0x0d, 0xb8, 0x64, 0x73, 0x26, 0x19, 0x79, 0x0c, 0x15, 0xb5, 0x87, 0xa3,
|
||||
0x75, 0xb4, 0x6e, 0xfb, 0xd1, 0x83, 0x5e, 0xb1, 0x47, 0x6f, 0x9b, 0x99, 0x99, 0x93, 0x75, 0xcc,
|
||||
0x29, 0x86, 0x90, 0x4f, 0x81, 0x04, 0x88, 0x4d, 0x2f, 0x59, 0xe0, 0xf9, 0xeb, 0x69, 0xc8, 0x02,
|
||||
0xee, 0xe8, 0x1d, 0xad, 0x6b, 0x51, 0x3b, 0xf5, 0x1c, 0xa3, 0x63, 0xc8, 0x02, 0x4e, 0x08, 0x54,
|
||||
0x96, 0xdc, 0x8f, 0x9d, 0x0a, 0xfa, 0x71, 0xad, 0xb0, 0x55, 0xe8, 0x49, 0xa7, 0x9a, 0x62, 0x6a,
|
||||
0xed, 0xae, 0x01, 0x8a, 0x93, 0x48, 0x03, 0x6a, 0xe7, 0xc3, 0x6f, 0x87, 0xa3, 0xef, 0x86, 0xf6,
|
||||
0x8e, 0x32, 0x9e, 0x8e, 0xce, 0x87, 0x93, 0x3e, 0xb5, 0x35, 0x62, 0x41, 0xf5, 0xe4, 0xf0, 0xfc,
|
||||
0xa4, 0x6f, 0xeb, 0xa4, 0x05, 0xd6, 0xe9, 0xd9, 0x78, 0x32, 0x3a, 0xa1, 0x87, 0x03, 0xdb, 0x20,
|
||||
0x04, 0xda, 0xe8, 0x29, 0xb0, 0x8a, 0x0a, 0x1d, 0x9f, 0x0f, 0x06, 0x87, 0xf4, 0x85, 0x5d, 0x25,
|
||||
0x75, 0xa8, 0x9c, 0x0d, 0x8f, 0x47, 0xb6, 0x49, 0x9a, 0x50, 0x1f, 0x4f, 0x0e, 0x27, 0xfd, 0x71,
|
||||
0x7f, 0x62, 0xd7, 0xdc, 0x27, 0x60, 0x8e, 0x59, 0x10, 0xfb, 0x9c, 0xec, 0x41, 0xf5, 0x15, 0xf3,
|
||||
0x57, 0xe9, 0xb5, 0x68, 0x34, 0x35, 0xc8, 0x07, 0x60, 0x49, 0x2f, 0xe0, 0x42, 0xb2, 0x20, 0xc6,
|
||||
0xef, 0x34, 0x68, 0x01, 0xb8, 0x11, 0xd4, 0xfb, 0x57, 0x3c, 0x88, 0x7d, 0x96, 0x90, 0x03, 0x30,
|
||||
0x7d, 0x76, 0xc1, 0x7d, 0xe1, 0x68, 0x1d, 0xa3, 0xdb, 0x78, 0xb4, 0x5b, 0xbe, 0xd7, 0x67, 0xca,
|
||||
0x73, 0x54, 0x79, 0xfd, 0xe7, 0xfd, 0x1d, 0x9a, 0xd1, 0x8a, 0x03, 0xf5, 0x7f, 0x3d, 0xd0, 0x78,
|
||||
0xf3, 0xc0, 0xdf, 0xab, 0x60, 0x9d, 0x7a, 0x42, 0x46, 0x8b, 0x84, 0x05, 0xe4, 0x1e, 0x58, 0xb3,
|
||||
0x68, 0x15, 0xca, 0xa9, 0x17, 0x4a, 0x94, 0x5d, 0x39, 0xdd, 0xa1, 0x75, 0x84, 0xce, 0x42, 0x49,
|
||||
0x3e, 0x84, 0x46, 0xea, 0xbe, 0xf4, 0x23, 0x26, 0xd3, 0x63, 0x4e, 0x77, 0x28, 0x20, 0x78, 0xac,
|
||||
0x30, 0x62, 0x83, 0x21, 0x56, 0x01, 0x9e, 0xa3, 0x51, 0xb5, 0x24, 0x77, 0xc0, 0x14, 0xb3, 0x25,
|
||||
0x0f, 0x18, 0x66, 0x6d, 0x97, 0x66, 0x16, 0x79, 0x00, 0xed, 0x9f, 0x78, 0x12, 0x4d, 0xe5, 0x32,
|
||||
0xe1, 0x62, 0x19, 0xf9, 0x73, 0xcc, 0xa0, 0x46, 0x5b, 0x0a, 0x9d, 0xe4, 0x20, 0xf9, 0x28, 0xa3,
|
||||
0x15, 0xba, 0x4c, 0xd4, 0xa5, 0xd1, 0xa6, 0xc2, 0x9f, 0xe6, 0xda, 0x3e, 0x01, 0xbb, 0xc4, 0x4b,
|
||||
0x05, 0xd6, 0x50, 0xa0, 0x46, 0xdb, 0x1b, 0x66, 0x2a, 0xf2, 0x2b, 0x68, 0x87, 0x7c, 0xc1, 0xa4,
|
||||
0xf7, 0x8a, 0x4f, 0x45, 0xcc, 0x42, 0xe1, 0xd4, 0xf1, 0x86, 0xef, 0x94, 0x6f, 0xf8, 0x68, 0x35,
|
||||
0x7b, 0xc9, 0xe5, 0x38, 0x66, 0x21, 0x6d, 0xe5, 0x6c, 0x65, 0x09, 0xf2, 0x31, 0xdc, 0xda, 0x84,
|
||||
0xcf, 0xb9, 0x2f, 0x99, 0x70, 0xac, 0x8e, 0xd1, 0x25, 0x74, 0xb3, 0xeb, 0x37, 0x88, 0x6e, 0x11,
|
||||
0x51, 0x97, 0x70, 0xa0, 0x63, 0x74, 0xb5, 0x82, 0x88, 0xa2, 0x84, 0x12, 0x14, 0x47, 0xc2, 0x2b,
|
||||
0x09, 0x6a, 0xfc, 0xb7, 0xa0, 0x9c, 0xbd, 0x11, 0xb4, 0x09, 0xcf, 0x04, 0x35, 0x53, 0x41, 0x39,
|
||||
0x5c, 0x08, 0xda, 0x10, 0x33, 0x41, 0xad, 0x54, 0x50, 0x0e, 0x67, 0x82, 0xbe, 0x06, 0x48, 0xb8,
|
||||
0xe0, 0x72, 0xba, 0x54, 0x37, 0xde, 0xc6, 0xbe, 0xbe, 0x5f, 0x16, 0xb3, 0xa9, 0x99, 0x1e, 0x55,
|
||||
0xbc, 0x53, 0x2f, 0x94, 0xd4, 0x4a, 0xf2, 0xe5, 0x76, 0xd1, 0xdd, 0x7a, 0xb3, 0xe8, 0x3e, 0x07,
|
||||
0x6b, 0x13, 0xb5, 0xdd, 0x9d, 0x35, 0x30, 0x5e, 0xf4, 0xc7, 0xb6, 0x46, 0x4c, 0xd0, 0x87, 0x23,
|
||||
0x5b, 0x2f, 0x3a, 0xd4, 0x38, 0xaa, 0x41, 0x15, 0x35, 0x1f, 0x35, 0x01, 0x8a, 0x54, 0xbb, 0x4f,
|
||||
0x00, 0x8a, 0x9b, 0x51, 0xd5, 0x16, 0x5d, 0x5e, 0x0a, 0x9e, 0x96, 0xef, 0x2e, 0xcd, 0x2c, 0x85,
|
||||
0xfb, 0x3c, 0x5c, 0xc8, 0x25, 0x56, 0x6d, 0x8b, 0x66, 0x96, 0xfb, 0xb7, 0x06, 0x30, 0xf1, 0x02,
|
||||
0x3e, 0xe6, 0x89, 0xc7, 0xc5, 0xbb, 0xf7, 0xdc, 0x23, 0xa8, 0x09, 0x6c, 0x77, 0xe1, 0xe8, 0x18,
|
||||
0x41, 0xca, 0x11, 0xe9, 0x24, 0xc8, 0x42, 0x72, 0x22, 0xf9, 0x02, 0x2c, 0x9e, 0x35, 0xb9, 0x70,
|
||||
0x0c, 0x8c, 0xda, 0x2b, 0x47, 0xe5, 0x13, 0x20, 0x8b, 0x2b, 0xc8, 0xe4, 0x4b, 0x80, 0x65, 0x7e,
|
||||
0xf1, 0xc2, 0xa9, 0x60, 0xe8, 0xed, 0xb7, 0xa6, 0x25, 0x8b, 0x2d, 0xd1, 0xdd, 0x87, 0x50, 0xc5,
|
||||
0x2f, 0x50, 0x13, 0x13, 0xa7, 0xac, 0x96, 0x4e, 0x4c, 0xb5, 0xde, 0x9e, 0x1d, 0x56, 0x36, 0x3b,
|
||||
0xdc, 0xc7, 0x60, 0x3e, 0x4b, 0xbf, 0xf3, 0x5d, 0x2f, 0xc6, 0xfd, 0x59, 0x83, 0x26, 0xe2, 0x03,
|
||||
0x26, 0x67, 0x4b, 0x9e, 0x90, 0x87, 0x5b, 0x8f, 0xc4, 0xbd, 0x1b, 0xf1, 0x19, 0xaf, 0x57, 0x7a,
|
||||
0x1c, 0x72, 0xa1, 0xfa, 0xdb, 0x84, 0x1a, 0x65, 0xa1, 0x5d, 0xa8, 0xe0, 0xa8, 0x37, 0x41, 0xef,
|
||||
0x3f, 0x4f, 0xeb, 0x68, 0xd8, 0x7f, 0x9e, 0xd6, 0x11, 0x55, 0xe3, 0x5d, 0x01, 0xb4, 0x6f, 0x1b,
|
||||
0xee, 0xaf, 0x9a, 0x2a, 0x3e, 0x36, 0x57, 0xb5, 0x27, 0xc8, 0x7b, 0x50, 0x13, 0x92, 0xc7, 0xd3,
|
||||
0x40, 0xa0, 0x2e, 0x83, 0x9a, 0xca, 0x1c, 0x08, 0x75, 0xf4, 0xe5, 0x2a, 0x9c, 0xe5, 0x47, 0xab,
|
||||
0x35, 0x79, 0x1f, 0xea, 0x42, 0xb2, 0x44, 0x2a, 0x76, 0x3a, 0x48, 0x6b, 0x68, 0x0f, 0x04, 0xb9,
|
||||
0x0d, 0x26, 0x0f, 0xe7, 0x53, 0x4c, 0x8a, 0x72, 0x54, 0x79, 0x38, 0x1f, 0x08, 0x72, 0x17, 0xea,
|
||||
0x8b, 0x24, 0x5a, 0xc5, 0x5e, 0xb8, 0x70, 0xaa, 0x1d, 0xa3, 0x6b, 0xd1, 0x8d, 0x4d, 0xda, 0xa0,
|
||||
0x5f, 0xac, 0x71, 0x98, 0xd5, 0xa9, 0x7e, 0xb1, 0x56, 0xbb, 0x27, 0x2c, 0x5c, 0x70, 0xb5, 0x49,
|
||||
0x2d, 0xdd, 0x1d, 0xed, 0x81, 0x70, 0x7f, 0xd3, 0xa0, 0xfa, 0x74, 0xb9, 0x0a, 0x5f, 0x92, 0x7d,
|
||||
0x68, 0x04, 0x5e, 0x38, 0x55, 0xad, 0x54, 0x68, 0xb6, 0x02, 0x2f, 0x54, 0x35, 0x3c, 0x10, 0xe8,
|
||||
0x67, 0x57, 0x1b, 0x7f, 0xf6, 0xbe, 0x04, 0xec, 0x2a, 0xf3, 0xf7, 0xb2, 0x24, 0x18, 0x98, 0x84,
|
||||
0xbb, 0xe5, 0x24, 0xe0, 0x01, 0xbd, 0x7e, 0x38, 0x8b, 0xe6, 0x5e, 0xb8, 0x28, 0x32, 0xa0, 0xde,
|
||||
0x6d, 0xfc, 0xaa, 0x26, 0xc5, 0xb5, 0x7b, 0x00, 0xf5, 0x9c, 0x75, 0xa3, 0x79, 0xbf, 0x1f, 0xa9,
|
||||
0x67, 0x75, 0xeb, 0x2d, 0xd5, 0xdd, 0x1f, 0xa1, 0x85, 0x9b, 0xf3, 0xf9, 0xff, 0xed, 0xb2, 0x03,
|
||||
0x30, 0x67, 0x6a, 0x87, 0xbc, 0xc9, 0x76, 0x6f, 0x08, 0xcf, 0x03, 0x52, 0xda, 0xd1, 0xde, 0xeb,
|
||||
0xeb, 0x7d, 0xed, 0x8f, 0xeb, 0x7d, 0xed, 0xaf, 0xeb, 0x7d, 0xed, 0x07, 0x53, 0xb1, 0xe3, 0x8b,
|
||||
0x0b, 0x13, 0xff, 0x60, 0x3e, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0x36, 0xd7, 0x1e, 0xb4, 0xf2,
|
||||
0x08, 0x00, 0x00,
|
||||
// 1081 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46,
|
||||
0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0x0b, 0x27, 0x3f, 0xff, 0xa0, 0x71, 0x54, 0x02,
|
||||
0x69, 0x85, 0xa2, 0x90, 0x91, 0xb4, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x5d, 0xf9, 0x80, 0x46, 0x12,
|
||||
0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0x78, 0x2a, 0x77, 0x15, 0x58, 0x7d,
|
||||
0x8f, 0xde, 0xf5, 0x25, 0x7a, 0xdf, 0x07, 0x08, 0xd0, 0x9b, 0x3e, 0x41, 0x51, 0xf8, 0xaa, 0x8f,
|
||||
0x51, 0xec, 0x90, 0x14, 0xa9, 0x38, 0x05, 0x9a, 0xde, 0xed, 0x7c, 0xf3, 0xcd, 0xec, 0xc7, 0xdd,
|
||||
0x99, 0x59, 0x42, 0x43, 0xae, 0x63, 0x2e, 0x7a, 0x71, 0x12, 0xc9, 0x88, 0x40, 0x9c, 0x44, 0x01,
|
||||
0x97, 0x4b, 0xbe, 0x12, 0xf7, 0xf7, 0x16, 0xd1, 0x22, 0x42, 0xf8, 0x40, 0xad, 0x52, 0x86, 0xfb,
|
||||
0xb3, 0x0e, 0xed, 0x01, 0x97, 0x89, 0x37, 0x1b, 0x70, 0xc9, 0xe6, 0x4c, 0x32, 0xf2, 0x14, 0x2a,
|
||||
0x2a, 0x87, 0xa3, 0x75, 0xb4, 0x6e, 0xfb, 0xc9, 0xa3, 0x5e, 0x91, 0xa3, 0xb7, 0xcd, 0xcc, 0xcc,
|
||||
0xc9, 0x3a, 0xe6, 0x14, 0x43, 0xc8, 0xa7, 0x40, 0x02, 0xc4, 0xa6, 0x57, 0x2c, 0xf0, 0xfc, 0xf5,
|
||||
0x34, 0x64, 0x01, 0x77, 0xf4, 0x8e, 0xd6, 0xb5, 0xa8, 0x9d, 0x7a, 0x4e, 0xd0, 0x31, 0x64, 0x01,
|
||||
0x27, 0x04, 0x2a, 0x4b, 0xee, 0xc7, 0x4e, 0x05, 0xfd, 0xb8, 0x56, 0xd8, 0x2a, 0xf4, 0xa4, 0x53,
|
||||
0x4d, 0x31, 0xb5, 0x76, 0xd7, 0x00, 0xc5, 0x4e, 0xa4, 0x01, 0xb5, 0x8b, 0xe1, 0x37, 0xc3, 0xd1,
|
||||
0xb7, 0x43, 0x7b, 0x47, 0x19, 0xc7, 0xa3, 0x8b, 0xe1, 0xa4, 0x4f, 0x6d, 0x8d, 0x58, 0x50, 0x3d,
|
||||
0x3d, 0xbc, 0x38, 0xed, 0xdb, 0x3a, 0x69, 0x81, 0x75, 0x76, 0x3e, 0x9e, 0x8c, 0x4e, 0xe9, 0xe1,
|
||||
0xc0, 0x36, 0x08, 0x81, 0x36, 0x7a, 0x0a, 0xac, 0xa2, 0x42, 0xc7, 0x17, 0x83, 0xc1, 0x21, 0x7d,
|
||||
0x69, 0x57, 0x49, 0x1d, 0x2a, 0xe7, 0xc3, 0x93, 0x91, 0x6d, 0x92, 0x26, 0xd4, 0xc7, 0x93, 0xc3,
|
||||
0x49, 0x7f, 0xdc, 0x9f, 0xd8, 0x35, 0xf7, 0x19, 0x98, 0x63, 0x16, 0xc4, 0x3e, 0x27, 0x7b, 0x50,
|
||||
0x7d, 0xcd, 0xfc, 0x55, 0x7a, 0x2c, 0x1a, 0x4d, 0x0d, 0xf2, 0x01, 0x58, 0xd2, 0x0b, 0xb8, 0x90,
|
||||
0x2c, 0x88, 0xf1, 0x3b, 0x0d, 0x5a, 0x00, 0x6e, 0x04, 0xf5, 0xfe, 0x35, 0x0f, 0x62, 0x9f, 0x25,
|
||||
0xe4, 0x00, 0x4c, 0x9f, 0x5d, 0x72, 0x5f, 0x38, 0x5a, 0xc7, 0xe8, 0x36, 0x9e, 0xec, 0x96, 0xcf,
|
||||
0xf5, 0xb9, 0xf2, 0x1c, 0x55, 0xde, 0xfc, 0xf1, 0x70, 0x87, 0x66, 0xb4, 0x62, 0x43, 0xfd, 0x1f,
|
||||
0x37, 0x34, 0xde, 0xde, 0xf0, 0xb7, 0x2a, 0x58, 0x67, 0x9e, 0x90, 0xd1, 0x22, 0x61, 0x01, 0x79,
|
||||
0x00, 0xd6, 0x2c, 0x5a, 0x85, 0x72, 0xea, 0x85, 0x12, 0x65, 0x57, 0xce, 0x76, 0x68, 0x1d, 0xa1,
|
||||
0xf3, 0x50, 0x92, 0x0f, 0xa1, 0x91, 0xba, 0xaf, 0xfc, 0x88, 0xc9, 0x74, 0x9b, 0xb3, 0x1d, 0x0a,
|
||||
0x08, 0x9e, 0x28, 0x8c, 0xd8, 0x60, 0x88, 0x55, 0x80, 0xfb, 0x68, 0x54, 0x2d, 0xc9, 0x3d, 0x30,
|
||||
0xc5, 0x6c, 0xc9, 0x03, 0x86, 0xb7, 0xb6, 0x4b, 0x33, 0x8b, 0x3c, 0x82, 0xf6, 0x8f, 0x3c, 0x89,
|
||||
0xa6, 0x72, 0x99, 0x70, 0xb1, 0x8c, 0xfc, 0x39, 0xde, 0xa0, 0x46, 0x5b, 0x0a, 0x9d, 0xe4, 0x20,
|
||||
0xf9, 0x28, 0xa3, 0x15, 0xba, 0x4c, 0xd4, 0xa5, 0xd1, 0xa6, 0xc2, 0x8f, 0x73, 0x6d, 0x9f, 0x80,
|
||||
0x5d, 0xe2, 0xa5, 0x02, 0x6b, 0x28, 0x50, 0xa3, 0xed, 0x0d, 0x33, 0x15, 0x79, 0x0c, 0xed, 0x90,
|
||||
0x2f, 0x98, 0xf4, 0x5e, 0xf3, 0xa9, 0x88, 0x59, 0x28, 0x9c, 0x3a, 0x9e, 0xf0, 0xbd, 0xf2, 0x09,
|
||||
0x1f, 0xad, 0x66, 0xaf, 0xb8, 0x1c, 0xc7, 0x2c, 0xcc, 0x8e, 0xb9, 0x95, 0xc7, 0x28, 0x4c, 0x90,
|
||||
0x8f, 0xe1, 0xce, 0x26, 0xc9, 0x9c, 0xfb, 0x92, 0x09, 0xc7, 0xea, 0x18, 0x5d, 0x42, 0x37, 0xb9,
|
||||
0xbf, 0x46, 0x74, 0x8b, 0x88, 0xea, 0x84, 0x03, 0x1d, 0xa3, 0xab, 0x15, 0x44, 0x94, 0x26, 0x94,
|
||||
0xac, 0x38, 0x12, 0x5e, 0x49, 0x56, 0xe3, 0xdf, 0xc8, 0xca, 0x63, 0x36, 0xb2, 0x36, 0x49, 0x32,
|
||||
0x59, 0xcd, 0x54, 0x56, 0x0e, 0x17, 0xb2, 0x36, 0xc4, 0x4c, 0x56, 0x2b, 0x95, 0x95, 0xc3, 0x99,
|
||||
0xac, 0xaf, 0x00, 0x12, 0x2e, 0xb8, 0x9c, 0x2e, 0xd5, 0xe9, 0xb7, 0xb1, 0xc7, 0x1f, 0x96, 0x25,
|
||||
0x6d, 0xea, 0xa7, 0x47, 0x15, 0xef, 0xcc, 0x0b, 0x25, 0xb5, 0x92, 0x7c, 0xb9, 0x5d, 0x80, 0x77,
|
||||
0xde, 0x2e, 0xc0, 0xcf, 0xc1, 0xda, 0x44, 0x6d, 0x77, 0x6a, 0x0d, 0x8c, 0x97, 0xfd, 0xb1, 0xad,
|
||||
0x11, 0x13, 0xf4, 0xe1, 0xc8, 0xd6, 0x8b, 0x6e, 0x35, 0x8e, 0x6a, 0x50, 0x45, 0xcd, 0x47, 0x4d,
|
||||
0x80, 0xe2, 0xda, 0xdd, 0x67, 0x00, 0xc5, 0xf9, 0xa8, 0xca, 0x8b, 0xae, 0xae, 0x04, 0x4f, 0x4b,
|
||||
0x79, 0x97, 0x66, 0x96, 0xc2, 0x7d, 0x1e, 0x2e, 0xe4, 0x12, 0x2b, 0xb8, 0x45, 0x33, 0xcb, 0xfd,
|
||||
0x4b, 0x03, 0x98, 0x78, 0x01, 0x1f, 0xf3, 0xc4, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x09, 0xd4, 0x04,
|
||||
0xb6, 0xbe, 0x70, 0x74, 0x8c, 0x20, 0xe5, 0x88, 0x74, 0x2a, 0x64, 0x21, 0x39, 0x91, 0x7c, 0x01,
|
||||
0x16, 0xcf, 0x1a, 0x5e, 0x38, 0x06, 0x46, 0xed, 0x95, 0xa3, 0xf2, 0x69, 0x90, 0xc5, 0x15, 0x64,
|
||||
0xf2, 0x25, 0xc0, 0x32, 0x3f, 0x78, 0xe1, 0x54, 0x30, 0xf4, 0xee, 0x3b, 0xaf, 0x25, 0x8b, 0x2d,
|
||||
0xd1, 0xdd, 0xc7, 0x50, 0xc5, 0x2f, 0x50, 0xd3, 0x13, 0x27, 0xae, 0x96, 0x4e, 0x4f, 0xb5, 0xde,
|
||||
0x9e, 0x23, 0x56, 0x36, 0x47, 0xdc, 0xa7, 0x60, 0x3e, 0x4f, 0xbf, 0xf3, 0x7d, 0x0f, 0xc6, 0xfd,
|
||||
0x49, 0x83, 0x26, 0xe2, 0x03, 0x26, 0x67, 0x4b, 0x9e, 0x90, 0xc7, 0x5b, 0x0f, 0xc6, 0x83, 0x5b,
|
||||
0xf1, 0x19, 0xaf, 0x57, 0x7a, 0x28, 0x72, 0xa1, 0xfa, 0xbb, 0x84, 0x1a, 0x65, 0xa1, 0x5d, 0xa8,
|
||||
0xe0, 0xd8, 0x37, 0x41, 0xef, 0xbf, 0x48, 0xeb, 0x68, 0xd8, 0x7f, 0x91, 0xd6, 0x11, 0x55, 0xa3,
|
||||
0x5e, 0x01, 0xb4, 0x6f, 0x1b, 0xee, 0x2f, 0x9a, 0x2a, 0x3e, 0x36, 0x57, 0xb5, 0x27, 0xc8, 0xff,
|
||||
0xa0, 0x26, 0x24, 0x8f, 0xa7, 0x81, 0x40, 0x5d, 0x06, 0x35, 0x95, 0x39, 0x10, 0x6a, 0xeb, 0xab,
|
||||
0x55, 0x38, 0xcb, 0xb7, 0x56, 0x6b, 0xf2, 0x7f, 0xa8, 0x0b, 0xc9, 0x12, 0xa9, 0xd8, 0xe9, 0x50,
|
||||
0xad, 0xa1, 0x3d, 0x10, 0xe4, 0x2e, 0x98, 0x3c, 0x9c, 0x4f, 0xf1, 0x52, 0x94, 0xa3, 0xca, 0xc3,
|
||||
0xf9, 0x40, 0x90, 0xfb, 0x50, 0x5f, 0x24, 0xd1, 0x2a, 0xf6, 0xc2, 0x85, 0x53, 0xed, 0x18, 0x5d,
|
||||
0x8b, 0x6e, 0x6c, 0xd2, 0x06, 0xfd, 0x72, 0x8d, 0x83, 0xad, 0x4e, 0xf5, 0xcb, 0xb5, 0xca, 0x9e,
|
||||
0xb0, 0x70, 0xc1, 0x55, 0x92, 0x5a, 0x9a, 0x1d, 0xed, 0x81, 0x70, 0x7f, 0xd5, 0xa0, 0x7a, 0xbc,
|
||||
0x5c, 0x85, 0xaf, 0xc8, 0x3e, 0x34, 0x02, 0x2f, 0x9c, 0xaa, 0x56, 0x2a, 0x34, 0x5b, 0x81, 0x17,
|
||||
0xaa, 0x1a, 0x1e, 0x08, 0xf4, 0xb3, 0xeb, 0x8d, 0x3f, 0x7b, 0x6b, 0x02, 0x76, 0x9d, 0xf9, 0x7b,
|
||||
0xd9, 0x25, 0x18, 0x78, 0x09, 0xf7, 0xcb, 0x97, 0x80, 0x1b, 0xf4, 0xfa, 0xe1, 0x2c, 0x9a, 0x7b,
|
||||
0xe1, 0xa2, 0xb8, 0x01, 0xf5, 0x86, 0xe3, 0x57, 0x35, 0x29, 0xae, 0xdd, 0x03, 0xa8, 0xe7, 0xac,
|
||||
0x5b, 0xcd, 0xfb, 0xdd, 0x48, 0x3d, 0xb1, 0x5b, 0xef, 0xaa, 0xee, 0xfe, 0x00, 0x2d, 0x4c, 0xce,
|
||||
0xe7, 0xff, 0xb5, 0xcb, 0x0e, 0xc0, 0x9c, 0xa9, 0x0c, 0x79, 0x93, 0xed, 0xde, 0x12, 0x9e, 0x07,
|
||||
0xa4, 0xb4, 0xa3, 0xbd, 0x37, 0x37, 0xfb, 0xda, 0xef, 0x37, 0xfb, 0xda, 0x9f, 0x37, 0xfb, 0xda,
|
||||
0xf7, 0xa6, 0x62, 0xc7, 0x97, 0x97, 0x26, 0xfe, 0xcd, 0x7c, 0xf6, 0x77, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0x53, 0x09, 0xe5, 0x37, 0xfe, 0x08, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
|
||||
|
@ -2903,7 +2903,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.NegativeSpans = append(m.NegativeSpans, &BucketSpan{})
|
||||
m.NegativeSpans = append(m.NegativeSpans, BucketSpan{})
|
||||
if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3069,7 +3069,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.PositiveSpans = append(m.PositiveSpans, &BucketSpan{})
|
||||
m.PositiveSpans = append(m.PositiveSpans, BucketSpan{})
|
||||
if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -86,9 +86,9 @@ message Histogram {
|
|||
uint64 zero_count_int = 6;
|
||||
double zero_count_float = 7;
|
||||
}
|
||||
|
||||
|
||||
// Negative Buckets.
|
||||
repeated BucketSpan negative_spans = 8;
|
||||
repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false];
|
||||
// Use either "negative_deltas" or "negative_counts", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
|
@ -96,7 +96,7 @@ message Histogram {
|
|||
repeated double negative_counts = 10; // Absolute count of each bucket.
|
||||
|
||||
// Positive Buckets.
|
||||
repeated BucketSpan positive_spans = 11;
|
||||
repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
|
||||
// Use either "positive_deltas" or "positive_counts", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
|
@ -107,7 +107,7 @@ message Histogram {
|
|||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 15;
|
||||
}
|
||||
}
|
||||
|
||||
// A BucketSpan defines a number of consecutive buckets with their
|
||||
// offset. Logically, it would be more straightforward to include the
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
"github.com/go-kit/log"
|
||||
|
||||
"github.com/prometheus/prometheus/util/stats"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
|
@ -35,7 +35,7 @@ import (
|
|||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/util/stats"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -3125,7 +3125,7 @@ func TestRangeQuery(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSparseHistogramRate(t *testing.T) {
|
||||
func TestNativeHistogramRate(t *testing.T) {
|
||||
// TODO(beorn7): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
test, err := NewTest(t, "")
|
||||
|
@ -3136,7 +3136,7 @@ func TestSparseHistogramRate(t *testing.T) {
|
|||
lbls := labels.FromStrings("__name__", seriesName)
|
||||
|
||||
app := test.Storage().Appender(context.TODO())
|
||||
for i, h := range tsdb.GenerateTestHistograms(100) {
|
||||
for i, h := range tsdbutil.GenerateTestHistograms(100) {
|
||||
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -3155,20 +3155,22 @@ func TestSparseHistogramRate(t *testing.T) {
|
|||
require.Len(t, vector, 1)
|
||||
actualHistogram := vector[0].H
|
||||
expectedHistogram := &histogram.FloatHistogram{
|
||||
Schema: 1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 1. / 15.,
|
||||
Count: 8. / 15.,
|
||||
Sum: 1.226666666666667,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||
NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||
NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||
// TODO(beorn7): This should be GaugeType. Change it once supported by PromQL.
|
||||
CounterResetHint: histogram.NotCounterReset,
|
||||
Schema: 1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 1. / 15.,
|
||||
Count: 8. / 15.,
|
||||
Sum: 1.226666666666667,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||
NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||
NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||
}
|
||||
require.Equal(t, expectedHistogram, actualHistogram)
|
||||
}
|
||||
|
||||
func TestSparseFloatHistogramRate(t *testing.T) {
|
||||
func TestNativeFloatHistogramRate(t *testing.T) {
|
||||
// TODO(beorn7): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
test, err := NewTest(t, "")
|
||||
|
@ -3179,7 +3181,7 @@ func TestSparseFloatHistogramRate(t *testing.T) {
|
|||
lbls := labels.FromStrings("__name__", seriesName)
|
||||
|
||||
app := test.Storage().Appender(context.TODO())
|
||||
for i, fh := range tsdb.GenerateTestFloatHistograms(100) {
|
||||
for i, fh := range tsdbutil.GenerateTestFloatHistograms(100) {
|
||||
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), nil, fh)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -3198,20 +3200,22 @@ func TestSparseFloatHistogramRate(t *testing.T) {
|
|||
require.Len(t, vector, 1)
|
||||
actualHistogram := vector[0].H
|
||||
expectedHistogram := &histogram.FloatHistogram{
|
||||
Schema: 1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 1. / 15.,
|
||||
Count: 8. / 15.,
|
||||
Sum: 1.226666666666667,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||
NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||
NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||
// TODO(beorn7): This should be GaugeType. Change it once supported by PromQL.
|
||||
CounterResetHint: histogram.NotCounterReset,
|
||||
Schema: 1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 1. / 15.,
|
||||
Count: 8. / 15.,
|
||||
Sum: 1.226666666666667,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||
NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||
NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||
}
|
||||
require.Equal(t, expectedHistogram, actualHistogram)
|
||||
}
|
||||
|
||||
func TestSparseHistogram_HistogramCountAndSum(t *testing.T) {
|
||||
func TestNativeHistogram_HistogramCountAndSum(t *testing.T) {
|
||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
h := &histogram.Histogram{
|
||||
|
@ -3290,7 +3294,7 @@ func TestSparseHistogram_HistogramCountAndSum(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSparseHistogram_HistogramQuantile(t *testing.T) {
|
||||
func TestNativeHistogram_HistogramQuantile(t *testing.T) {
|
||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
type subCase struct {
|
||||
|
@ -3526,7 +3530,7 @@ func TestSparseHistogram_HistogramQuantile(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSparseHistogram_HistogramFraction(t *testing.T) {
|
||||
func TestNativeHistogram_HistogramFraction(t *testing.T) {
|
||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
type subCase struct {
|
||||
|
@ -3961,7 +3965,7 @@ func TestSparseHistogram_HistogramFraction(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSparseHistogram_Sum_Count_AddOperator(t *testing.T) {
|
||||
func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
|
||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
cases := []struct {
|
||||
|
|
|
@ -83,11 +83,12 @@ type Alert struct {
|
|||
Value float64
|
||||
// The interval during which the condition of this alert held true.
|
||||
// ResolvedAt will be 0 to indicate a still active alert.
|
||||
ActiveAt time.Time
|
||||
FiredAt time.Time
|
||||
ResolvedAt time.Time
|
||||
LastSentAt time.Time
|
||||
ValidUntil time.Time
|
||||
ActiveAt time.Time
|
||||
FiredAt time.Time
|
||||
ResolvedAt time.Time
|
||||
LastSentAt time.Time
|
||||
ValidUntil time.Time
|
||||
KeepFiringSince time.Time
|
||||
}
|
||||
|
||||
func (a *Alert) needsSending(ts time.Time, resendDelay time.Duration) bool {
|
||||
|
@ -112,6 +113,9 @@ type AlertingRule struct {
|
|||
// The duration for which a labelset needs to persist in the expression
|
||||
// output vector before an alert transitions from Pending to Firing state.
|
||||
holdDuration time.Duration
|
||||
// The amount of time that the alert should remain firing after the
|
||||
// resolution.
|
||||
keepFiringFor time.Duration
|
||||
// Extra labels to attach to the resulting alert sample vectors.
|
||||
labels labels.Labels
|
||||
// Non-identifying key/value pairs.
|
||||
|
@ -142,7 +146,7 @@ type AlertingRule struct {
|
|||
|
||||
// NewAlertingRule constructs a new AlertingRule.
|
||||
func NewAlertingRule(
|
||||
name string, vec parser.Expr, hold time.Duration,
|
||||
name string, vec parser.Expr, hold, keepFiringFor time.Duration,
|
||||
labels, annotations, externalLabels labels.Labels, externalURL string,
|
||||
restored bool, logger log.Logger,
|
||||
) *AlertingRule {
|
||||
|
@ -152,6 +156,7 @@ func NewAlertingRule(
|
|||
name: name,
|
||||
vector: vec,
|
||||
holdDuration: hold,
|
||||
keepFiringFor: keepFiringFor,
|
||||
labels: labels,
|
||||
annotations: annotations,
|
||||
externalLabels: el,
|
||||
|
@ -201,6 +206,12 @@ func (r *AlertingRule) HoldDuration() time.Duration {
|
|||
return r.holdDuration
|
||||
}
|
||||
|
||||
// KeepFiringFor returns the duration an alerting rule should keep firing for
|
||||
// after resolution.
|
||||
func (r *AlertingRule) KeepFiringFor() time.Duration {
|
||||
return r.keepFiringFor
|
||||
}
|
||||
|
||||
// Labels returns the labels of the alerting rule.
|
||||
func (r *AlertingRule) Labels() labels.Labels {
|
||||
return r.labels
|
||||
|
@ -311,6 +322,8 @@ const resolvedRetention = 15 * time.Minute
|
|||
// Eval evaluates the rule expression and then creates pending alerts and fires
|
||||
// or removes previously pending alerts accordingly.
|
||||
func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) {
|
||||
ctx = NewOriginContext(ctx, NewRuleDetail(r))
|
||||
|
||||
res, err := query(ctx, r.vector.String(), ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -404,16 +417,36 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
|
|||
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
|
||||
for fp, a := range r.active {
|
||||
if _, ok := resultFPs[fp]; !ok {
|
||||
// There is no firing alerts for this fingerprint. The alert is no
|
||||
// longer firing.
|
||||
|
||||
// Use keepFiringFor value to determine if the alert should keep
|
||||
// firing.
|
||||
var keepFiring bool
|
||||
if a.State == StateFiring && r.keepFiringFor > 0 {
|
||||
if a.KeepFiringSince.IsZero() {
|
||||
a.KeepFiringSince = ts
|
||||
}
|
||||
if ts.Sub(a.KeepFiringSince) < r.keepFiringFor {
|
||||
keepFiring = true
|
||||
}
|
||||
}
|
||||
|
||||
// If the alert was previously firing, keep it around for a given
|
||||
// retention time so it is reported as resolved to the AlertManager.
|
||||
if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) {
|
||||
delete(r.active, fp)
|
||||
}
|
||||
if a.State != StateInactive {
|
||||
if a.State != StateInactive && !keepFiring {
|
||||
a.State = StateInactive
|
||||
a.ResolvedAt = ts
|
||||
}
|
||||
continue
|
||||
if !keepFiring {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// The alert is firing, reset keepFiringSince.
|
||||
a.KeepFiringSince = time.Time{}
|
||||
}
|
||||
numActivePending++
|
||||
|
||||
|
@ -512,11 +545,12 @@ func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay
|
|||
|
||||
func (r *AlertingRule) String() string {
|
||||
ar := rulefmt.Rule{
|
||||
Alert: r.name,
|
||||
Expr: r.vector.String(),
|
||||
For: model.Duration(r.holdDuration),
|
||||
Labels: r.labels.Map(),
|
||||
Annotations: r.annotations.Map(),
|
||||
Alert: r.name,
|
||||
Expr: r.vector.String(),
|
||||
For: model.Duration(r.holdDuration),
|
||||
KeepFiringFor: model.Duration(r.keepFiringFor),
|
||||
Labels: r.labels.Map(),
|
||||
Annotations: r.annotations.Map(),
|
||||
}
|
||||
|
||||
byt, err := yaml.Marshal(ar)
|
||||
|
|
|
@ -66,7 +66,7 @@ func TestAlertingRuleState(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, test := range tests {
|
||||
rule := NewAlertingRule(test.name, nil, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil)
|
||||
rule := NewAlertingRule(test.name, nil, 0, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil)
|
||||
rule.active = test.active
|
||||
got := rule.State()
|
||||
require.Equal(t, test.want, got, "test case %d unexpected AlertState, want:%d got:%d", i, test.want, got)
|
||||
|
@ -76,7 +76,7 @@ func TestAlertingRuleState(t *testing.T) {
|
|||
func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
||||
suite, err := promql.NewTest(t, `
|
||||
load 1m
|
||||
http_requests{job="app-server", instance="0"} 75 85 70 70
|
||||
http_requests{job="app-server", instance="0"} 75 85 70 70 stale
|
||||
`)
|
||||
require.NoError(t, err)
|
||||
defer suite.Close()
|
||||
|
@ -90,6 +90,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
|||
"HTTPRequestRateLow",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
// Basing alerting rule labels off of a value that can change is a very bad idea.
|
||||
// If an alert is going back and forth between two label values it will never fire.
|
||||
// Instead, you should write two alerts with constant labels.
|
||||
|
@ -173,6 +174,10 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
|||
|
||||
require.Equal(t, result, filteredRes)
|
||||
}
|
||||
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
|
||||
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(res))
|
||||
}
|
||||
|
||||
func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
||||
|
@ -192,6 +197,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
|||
"ExternalLabelDoesNotExist",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("templated_label", "There are {{ len $externalLabels }} external Labels, of which foo is {{ $externalLabels.foo }}."),
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
|
@ -202,6 +208,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
|||
"ExternalLabelExists",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("templated_label", "There are {{ len $externalLabels }} external Labels, of which foo is {{ $externalLabels.foo }}."),
|
||||
labels.EmptyLabels(),
|
||||
labels.FromStrings("foo", "bar", "dings", "bums"),
|
||||
|
@ -286,6 +293,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
|||
"ExternalURLDoesNotExist",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("templated_label", "The external URL is {{ $externalURL }}."),
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
|
@ -296,6 +304,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
|||
"ExternalURLExists",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("templated_label", "The external URL is {{ $externalURL }}."),
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
|
@ -380,6 +389,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
|
|||
"EmptyLabel",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("empty_label", ""),
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
|
@ -436,6 +446,7 @@ func TestAlertingRuleQueryInTemplate(t *testing.T) {
|
|||
"ruleWithQueryInTemplate",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("label", "value"),
|
||||
labels.FromStrings("templated_label", `{{- with "sort(sum(http_requests) by (instance))" | query -}}
|
||||
{{- range $i,$v := . -}}
|
||||
|
@ -480,7 +491,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
|
|||
|
||||
func BenchmarkAlertingRuleAtomicField(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
rule := NewAlertingRule("bench", nil, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil)
|
||||
rule := NewAlertingRule("bench", nil, 0, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
@ -518,6 +529,7 @@ func TestAlertingRuleDuplicate(t *testing.T) {
|
|||
"foo",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("test", "test"),
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
|
@ -564,6 +576,7 @@ func TestAlertingRuleLimit(t *testing.T) {
|
|||
"foo",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("test", "test"),
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
|
@ -636,6 +649,7 @@ func TestQueryForStateSeries(t *testing.T) {
|
|||
"TestRule",
|
||||
nil,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("severity", "critical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
@ -669,6 +683,7 @@ func TestSendAlertsDontAffectActiveAlerts(t *testing.T) {
|
|||
"TestRule",
|
||||
nil,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("severity", "critical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
@ -712,3 +727,209 @@ func TestSendAlertsDontAffectActiveAlerts(t *testing.T) {
|
|||
// But the labels with the AlertingRule should not be changed.
|
||||
require.Equal(t, labels.FromStrings("a1", "1"), rule.active[h].Labels)
|
||||
}
|
||||
|
||||
func TestKeepFiringFor(t *testing.T) {
|
||||
suite, err := promql.NewTest(t, `
|
||||
load 1m
|
||||
http_requests{job="app-server", instance="0"} 75 85 70 70 10x5
|
||||
`)
|
||||
require.NoError(t, err)
|
||||
defer suite.Close()
|
||||
|
||||
require.NoError(t, suite.Run())
|
||||
|
||||
expr, err := parser.ParseExpr(`http_requests > 50`)
|
||||
require.NoError(t, err)
|
||||
|
||||
rule := NewAlertingRule(
|
||||
"HTTPRequestRateHigh",
|
||||
expr,
|
||||
time.Minute,
|
||||
time.Minute,
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
||||
results := []promql.Vector{
|
||||
{
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS",
|
||||
"alertname", "HTTPRequestRateHigh",
|
||||
"alertstate", "pending",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
),
|
||||
Point: promql.Point{V: 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS",
|
||||
"alertname", "HTTPRequestRateHigh",
|
||||
"alertstate", "firing",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
),
|
||||
Point: promql.Point{V: 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS",
|
||||
"alertname", "HTTPRequestRateHigh",
|
||||
"alertstate", "firing",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
),
|
||||
Point: promql.Point{V: 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS",
|
||||
"alertname", "HTTPRequestRateHigh",
|
||||
"alertstate", "firing",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
),
|
||||
Point: promql.Point{V: 1},
|
||||
},
|
||||
},
|
||||
// From now on the alert should keep firing.
|
||||
{
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS",
|
||||
"alertname", "HTTPRequestRateHigh",
|
||||
"alertstate", "firing",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
),
|
||||
Point: promql.Point{V: 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
baseTime := time.Unix(0, 0)
|
||||
for i, result := range results {
|
||||
t.Logf("case %d", i)
|
||||
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
|
||||
result[0].Point.T = timestamp.FromTime(evalTime)
|
||||
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
for _, smpl := range res {
|
||||
smplName := smpl.Metric.Get("__name__")
|
||||
if smplName == "ALERTS" {
|
||||
filteredRes = append(filteredRes, smpl)
|
||||
} else {
|
||||
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
|
||||
require.Equal(t, "ALERTS_FOR_STATE", smplName)
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, result, filteredRes)
|
||||
}
|
||||
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
|
||||
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(res))
|
||||
}
|
||||
|
||||
func TestPendingAndKeepFiringFor(t *testing.T) {
|
||||
suite, err := promql.NewTest(t, `
|
||||
load 1m
|
||||
http_requests{job="app-server", instance="0"} 75 10x10
|
||||
`)
|
||||
require.NoError(t, err)
|
||||
defer suite.Close()
|
||||
|
||||
require.NoError(t, suite.Run())
|
||||
|
||||
expr, err := parser.ParseExpr(`http_requests > 50`)
|
||||
require.NoError(t, err)
|
||||
|
||||
rule := NewAlertingRule(
|
||||
"HTTPRequestRateHigh",
|
||||
expr,
|
||||
time.Minute,
|
||||
time.Minute,
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
||||
result := promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS",
|
||||
"alertname", "HTTPRequestRateHigh",
|
||||
"alertstate", "pending",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
),
|
||||
Point: promql.Point{V: 1},
|
||||
}
|
||||
|
||||
baseTime := time.Unix(0, 0)
|
||||
result.Point.T = timestamp.FromTime(baseTime)
|
||||
res, err := rule.Eval(suite.Context(), baseTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, res, 2)
|
||||
for _, smpl := range res {
|
||||
smplName := smpl.Metric.Get("__name__")
|
||||
if smplName == "ALERTS" {
|
||||
require.Equal(t, result, smpl)
|
||||
} else {
|
||||
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
|
||||
require.Equal(t, "ALERTS_FOR_STATE", smplName)
|
||||
}
|
||||
}
|
||||
|
||||
evalTime := baseTime.Add(time.Minute)
|
||||
res, err = rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(res))
|
||||
}
|
||||
|
||||
// TestAlertingEvalWithOrigin checks that the alerting rule details are passed through the context.
|
||||
func TestAlertingEvalWithOrigin(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
now := time.Now()
|
||||
|
||||
const (
|
||||
name = "my-recording-rule"
|
||||
query = `count(metric{foo="bar"}) > 0`
|
||||
)
|
||||
var (
|
||||
detail RuleDetail
|
||||
lbs = labels.FromStrings("test", "test")
|
||||
)
|
||||
|
||||
expr, err := parser.ParseExpr(query)
|
||||
require.NoError(t, err)
|
||||
|
||||
rule := NewAlertingRule(
|
||||
name,
|
||||
expr,
|
||||
time.Second,
|
||||
time.Minute,
|
||||
lbs,
|
||||
nil,
|
||||
nil,
|
||||
"",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
|
||||
_, err = rule.Eval(ctx, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) {
|
||||
detail = FromOriginContext(ctx)
|
||||
return nil, nil
|
||||
}, nil, 0)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, detail, NewRuleDetail(rule))
|
||||
}
|
||||
|
|
|
@ -1119,6 +1119,7 @@ func (m *Manager) LoadGroups(
|
|||
r.Alert.Value,
|
||||
expr,
|
||||
time.Duration(r.For),
|
||||
time.Duration(r.KeepFiringFor),
|
||||
labels.FromMap(r.Labels),
|
||||
labels.FromMap(r.Annotations),
|
||||
externalLabels,
|
||||
|
|
|
@ -38,8 +38,8 @@ import (
|
|||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
"github.com/prometheus/prometheus/util/teststorage"
|
||||
)
|
||||
|
||||
|
@ -66,6 +66,7 @@ func TestAlertingRule(t *testing.T) {
|
|||
"HTTPRequestRateLow",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("severity", "{{\"c\"}}ritical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
@ -209,6 +210,7 @@ func TestForStateAddSamples(t *testing.T) {
|
|||
"HTTPRequestRateLow",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("severity", "{{\"c\"}}ritical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
@ -383,6 +385,7 @@ func TestForStateRestore(t *testing.T) {
|
|||
"HTTPRequestRateLow",
|
||||
expr,
|
||||
alertForDuration,
|
||||
0,
|
||||
labels.FromStrings("severity", "critical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
@ -449,6 +452,7 @@ func TestForStateRestore(t *testing.T) {
|
|||
"HTTPRequestRateLow",
|
||||
expr,
|
||||
alertForDuration,
|
||||
0,
|
||||
labels.FromStrings("severity", "critical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil,
|
||||
)
|
||||
|
@ -615,13 +619,13 @@ func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.Point, error) {
|
|||
func TestCopyState(t *testing.T) {
|
||||
oldGroup := &Group{
|
||||
rules: []Rule{
|
||||
NewAlertingRule("alert", nil, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewAlertingRule("alert", nil, 0, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewRecordingRule("rule1", nil, labels.EmptyLabels()),
|
||||
NewRecordingRule("rule2", nil, labels.EmptyLabels()),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v1")),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v2")),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v3")),
|
||||
NewAlertingRule("alert2", nil, 0, labels.FromStrings("l2", "v1"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewAlertingRule("alert2", nil, 0, 0, labels.FromStrings("l2", "v1"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
},
|
||||
seriesInPreviousEval: []map[string]labels.Labels{
|
||||
{},
|
||||
|
@ -640,10 +644,10 @@ func TestCopyState(t *testing.T) {
|
|||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v0")),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v1")),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v2")),
|
||||
NewAlertingRule("alert", nil, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewAlertingRule("alert", nil, 0, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewRecordingRule("rule1", nil, labels.EmptyLabels()),
|
||||
NewAlertingRule("alert2", nil, 0, labels.FromStrings("l2", "v0"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewAlertingRule("alert2", nil, 0, labels.FromStrings("l2", "v1"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewAlertingRule("alert2", nil, 0, 0, labels.FromStrings("l2", "v0"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewAlertingRule("alert2", nil, 0, 0, labels.FromStrings("l2", "v1"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewRecordingRule("rule4", nil, labels.EmptyLabels()),
|
||||
},
|
||||
seriesInPreviousEval: make([]map[string]labels.Labels, 8),
|
||||
|
@ -875,7 +879,7 @@ func TestNotify(t *testing.T) {
|
|||
|
||||
expr, err := parser.ParseExpr("a > 1")
|
||||
require.NoError(t, err)
|
||||
rule := NewAlertingRule("aTooHigh", expr, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger())
|
||||
rule := NewAlertingRule("aTooHigh", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger())
|
||||
group := NewGroup(GroupOptions{
|
||||
Name: "alert",
|
||||
Interval: time.Second,
|
||||
|
@ -1147,7 +1151,7 @@ func TestGroupHasAlertingRules(t *testing.T) {
|
|||
group: &Group{
|
||||
name: "HasAlertingRule",
|
||||
rules: []Rule{
|
||||
NewAlertingRule("alert", nil, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewAlertingRule("alert", nil, 0, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewRecordingRule("record", nil, labels.EmptyLabels()),
|
||||
},
|
||||
},
|
||||
|
@ -1344,7 +1348,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
|
|||
|
||||
// Add some histograms.
|
||||
db := suite.TSDB()
|
||||
hists := tsdb.GenerateTestHistograms(5)
|
||||
hists := tsdbutil.GenerateTestHistograms(5)
|
||||
ts := time.Now()
|
||||
app := db.Appender(context.Background())
|
||||
for i, h := range hists {
|
||||
|
|
69
rules/origin.go
Normal file
69
rules/origin.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
type ruleOrigin struct{}
|
||||
|
||||
// RuleDetail contains information about the rule that is being evaluated.
|
||||
type RuleDetail struct {
|
||||
Name string
|
||||
Query string
|
||||
Labels labels.Labels
|
||||
Kind string
|
||||
}
|
||||
|
||||
const (
|
||||
KindAlerting = "alerting"
|
||||
KindRecording = "recording"
|
||||
)
|
||||
|
||||
// NewRuleDetail creates a RuleDetail from a given Rule.
|
||||
func NewRuleDetail(r Rule) RuleDetail {
|
||||
var kind string
|
||||
switch r.(type) {
|
||||
case *AlertingRule:
|
||||
kind = KindAlerting
|
||||
case *RecordingRule:
|
||||
kind = KindRecording
|
||||
default:
|
||||
panic(fmt.Sprintf(`unknown rule type "%T"`, r))
|
||||
}
|
||||
|
||||
return RuleDetail{
|
||||
Name: r.Name(),
|
||||
Query: r.Query().String(),
|
||||
Labels: r.Labels(),
|
||||
Kind: kind,
|
||||
}
|
||||
}
|
||||
|
||||
// NewOriginContext returns a new context with data about the origin attached.
|
||||
func NewOriginContext(ctx context.Context, rule RuleDetail) context.Context {
|
||||
return context.WithValue(ctx, ruleOrigin{}, rule)
|
||||
}
|
||||
|
||||
// FromOriginContext returns the RuleDetail origin data from the context.
|
||||
func FromOriginContext(ctx context.Context) RuleDetail {
|
||||
if rule, ok := ctx.Value(ruleOrigin{}).(RuleDetail); ok {
|
||||
return rule
|
||||
}
|
||||
return RuleDetail{}
|
||||
}
|
51
rules/origin_test.go
Normal file
51
rules/origin_test.go
Normal file
|
@ -0,0 +1,51 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
)
|
||||
|
||||
type unknownRule struct{}
|
||||
|
||||
func (u unknownRule) Name() string { return "" }
|
||||
func (u unknownRule) Labels() labels.Labels { return nil }
|
||||
func (u unknownRule) Eval(ctx context.Context, time time.Time, queryFunc QueryFunc, url *url.URL, i int) (promql.Vector, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (u unknownRule) String() string { return "" }
|
||||
func (u unknownRule) Query() parser.Expr { return nil }
|
||||
func (u unknownRule) SetLastError(err error) {}
|
||||
func (u unknownRule) LastError() error { return nil }
|
||||
func (u unknownRule) SetHealth(health RuleHealth) {}
|
||||
func (u unknownRule) Health() RuleHealth { return "" }
|
||||
func (u unknownRule) SetEvaluationDuration(duration time.Duration) {}
|
||||
func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 }
|
||||
func (u unknownRule) SetEvaluationTimestamp(time time.Time) {}
|
||||
func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} }
|
||||
|
||||
func TestNewRuleDetailPanics(t *testing.T) {
|
||||
require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() {
|
||||
NewRuleDetail(unknownRule{})
|
||||
})
|
||||
}
|
|
@ -73,6 +73,8 @@ func (rule *RecordingRule) Labels() labels.Labels {
|
|||
|
||||
// Eval evaluates the rule and then overrides the metric names and labels accordingly.
|
||||
func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) {
|
||||
ctx = NewOriginContext(ctx, NewRuleDetail(rule))
|
||||
|
||||
vector, err := query(ctx, rule.vector.String(), ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -156,3 +156,31 @@ func TestRecordingRuleLimit(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestRecordingEvalWithOrigin checks that the recording rule details are passed through the context.
|
||||
func TestRecordingEvalWithOrigin(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
now := time.Now()
|
||||
|
||||
const (
|
||||
name = "my-recording-rule"
|
||||
query = `count(metric{foo="bar"})`
|
||||
)
|
||||
|
||||
var (
|
||||
detail RuleDetail
|
||||
lbs = labels.FromStrings("foo", "bar")
|
||||
)
|
||||
|
||||
expr, err := parser.ParseExpr(query)
|
||||
require.NoError(t, err)
|
||||
|
||||
rule := NewRecordingRule(name, expr, lbs)
|
||||
_, err = rule.Eval(ctx, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) {
|
||||
detail = FromOriginContext(ctx)
|
||||
return nil, nil
|
||||
}, nil, 0)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, detail, NewRuleDetail(rule))
|
||||
}
|
||||
|
|
|
@ -440,6 +440,10 @@ type chainSampleIterator struct {
|
|||
|
||||
curr chunkenc.Iterator
|
||||
lastT int64
|
||||
|
||||
// Whether the previous and the current sample are direct neighbors
|
||||
// within the same base iterator.
|
||||
consecutive bool
|
||||
}
|
||||
|
||||
// Return a chainSampleIterator initialized for length entries, re-using the memory from it if possible.
|
||||
|
@ -485,6 +489,9 @@ func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType {
|
|||
if c.curr != nil && c.lastT >= t {
|
||||
return c.curr.Seek(c.lastT)
|
||||
}
|
||||
// Don't bother to find out if the next sample is consecutive. Callers
|
||||
// of Seek usually aren't interested anyway.
|
||||
c.consecutive = false
|
||||
c.h = samplesIteratorHeap{}
|
||||
for _, iter := range c.iterators {
|
||||
if iter.Seek(t) != chunkenc.ValNone {
|
||||
|
@ -511,14 +518,30 @@ func (c *chainSampleIterator) AtHistogram() (int64, *histogram.Histogram) {
|
|||
if c.curr == nil {
|
||||
panic("chainSampleIterator.AtHistogram called before first .Next or after .Next returned false.")
|
||||
}
|
||||
return c.curr.AtHistogram()
|
||||
t, h := c.curr.AtHistogram()
|
||||
// If the current sample is not consecutive with the previous one, we
|
||||
// cannot be sure anymore that there was no counter reset.
|
||||
if !c.consecutive && h.CounterResetHint == histogram.NotCounterReset {
|
||||
h.CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
return t, h
|
||||
}
|
||||
|
||||
func (c *chainSampleIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
if c.curr == nil {
|
||||
panic("chainSampleIterator.AtFloatHistogram called before first .Next or after .Next returned false.")
|
||||
}
|
||||
return c.curr.AtFloatHistogram()
|
||||
t, fh := c.curr.AtFloatHistogram()
|
||||
// If the current sample is not consecutive with the previous one, we
|
||||
// cannot be sure anymore about counter resets for counter histograms.
|
||||
// TODO(beorn7): If a `NotCounterReset` sample is followed by a
|
||||
// non-consecutive `CounterReset` sample, we could keep the hint as
|
||||
// `CounterReset`. But then we needed to track the previous sample
|
||||
// in more detail, which might not be worth it.
|
||||
if !c.consecutive && fh.CounterResetHint != histogram.GaugeType {
|
||||
fh.CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
return t, fh
|
||||
}
|
||||
|
||||
func (c *chainSampleIterator) AtT() int64 {
|
||||
|
@ -529,7 +552,13 @@ func (c *chainSampleIterator) AtT() int64 {
|
|||
}
|
||||
|
||||
func (c *chainSampleIterator) Next() chunkenc.ValueType {
|
||||
var (
|
||||
currT int64
|
||||
currValueType chunkenc.ValueType
|
||||
iteratorChanged bool
|
||||
)
|
||||
if c.h == nil {
|
||||
iteratorChanged = true
|
||||
c.h = samplesIteratorHeap{}
|
||||
// We call c.curr.Next() as the first thing below.
|
||||
// So, we don't call Next() on it here.
|
||||
|
@ -545,8 +574,6 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType {
|
|||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
var currT int64
|
||||
var currValueType chunkenc.ValueType
|
||||
for {
|
||||
currValueType = c.curr.Next()
|
||||
if currValueType != chunkenc.ValNone {
|
||||
|
@ -576,6 +603,7 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType {
|
|||
}
|
||||
|
||||
c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
|
||||
iteratorChanged = true
|
||||
currT = c.curr.AtT()
|
||||
currValueType = c.curr.Seek(currT)
|
||||
if currT != c.lastT {
|
||||
|
@ -583,6 +611,7 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType {
|
|||
}
|
||||
}
|
||||
|
||||
c.consecutive = !iteratorChanged
|
||||
c.lastT = currT
|
||||
return currValueType
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
|
@ -387,18 +386,11 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
|
|||
|
||||
// histogramSample returns a histogram that is unique to the ts.
|
||||
histogramSample := func(ts int64) sample {
|
||||
idx := ts + 1
|
||||
return sample{t: ts, h: &histogram.Histogram{
|
||||
Schema: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 2 * uint64(idx),
|
||||
Count: 5 * uint64(idx),
|
||||
Sum: 12.34 * float64(idx),
|
||||
PositiveSpans: []histogram.Span{{Offset: 1, Length: 2}, {Offset: 2, Length: 1}},
|
||||
NegativeSpans: []histogram.Span{{Offset: 2, Length: 1}, {Offset: 1, Length: 2}},
|
||||
PositiveBuckets: []int64{1 * idx, -1 * idx, 3 * idx},
|
||||
NegativeBuckets: []int64{1 * idx, 2 * idx, 3 * idx},
|
||||
}}
|
||||
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts + 1))}
|
||||
}
|
||||
|
||||
floatHistogramSample := func(ts int64) sample {
|
||||
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts + 1))}
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
|
@ -529,6 +521,46 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
|
|||
[]tsdbutil.Sample{histogramSample(15)},
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "float histogram chunks overlapping",
|
||||
input: []ChunkSeries{
|
||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []tsdbutil.Sample{floatHistogramSample(10), floatHistogramSample(15)}),
|
||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(2), floatHistogramSample(20)}, []tsdbutil.Sample{floatHistogramSample(25), floatHistogramSample(30)}),
|
||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(18), floatHistogramSample(26)}, []tsdbutil.Sample{floatHistogramSample(31), floatHistogramSample(35)}),
|
||||
},
|
||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||
[]tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(2), floatHistogramSample(5), floatHistogramSample(10), floatHistogramSample(15), floatHistogramSample(18), floatHistogramSample(20), floatHistogramSample(25), floatHistogramSample(26), floatHistogramSample(30)},
|
||||
[]tsdbutil.Sample{floatHistogramSample(31), floatHistogramSample(35)},
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "float histogram chunks overlapping with float chunks",
|
||||
input: []ChunkSeries{
|
||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []tsdbutil.Sample{floatHistogramSample(10), floatHistogramSample(15)}),
|
||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{12, 12, nil, nil}}, []tsdbutil.Sample{sample{14, 14, nil, nil}}),
|
||||
},
|
||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||
[]tsdbutil.Sample{floatHistogramSample(0)},
|
||||
[]tsdbutil.Sample{sample{1, 1, nil, nil}},
|
||||
[]tsdbutil.Sample{floatHistogramSample(5), floatHistogramSample(10)},
|
||||
[]tsdbutil.Sample{sample{12, 12, nil, nil}, sample{14, 14, nil, nil}},
|
||||
[]tsdbutil.Sample{floatHistogramSample(15)},
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "float histogram chunks overlapping with histogram chunks",
|
||||
input: []ChunkSeries{
|
||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []tsdbutil.Sample{floatHistogramSample(10), floatHistogramSample(15)}),
|
||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(1), histogramSample(12)}, []tsdbutil.Sample{histogramSample(14)}),
|
||||
},
|
||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||
[]tsdbutil.Sample{floatHistogramSample(0)},
|
||||
[]tsdbutil.Sample{histogramSample(1)},
|
||||
[]tsdbutil.Sample{floatHistogramSample(5), floatHistogramSample(10)},
|
||||
[]tsdbutil.Sample{histogramSample(12), histogramSample(14)},
|
||||
[]tsdbutil.Sample{floatHistogramSample(15)},
|
||||
),
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
merged := m(tc.input...)
|
||||
|
|
|
@ -87,7 +87,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
|
|||
func TestClientRetryAfter(t *testing.T) {
|
||||
server := httptest.NewServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, longErrMessage, 429)
|
||||
http.Error(w, longErrMessage, http.StatusTooManyRequests)
|
||||
}),
|
||||
)
|
||||
defer server.Close()
|
||||
|
|
|
@ -559,7 +559,7 @@ func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogr
|
|||
}
|
||||
}
|
||||
|
||||
func spansProtoToSpans(s []*prompb.BucketSpan) []histogram.Span {
|
||||
func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span {
|
||||
spans := make([]histogram.Span, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||
|
@ -600,10 +600,10 @@ func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogra
|
|||
}
|
||||
}
|
||||
|
||||
func spansToSpansProto(s []histogram.Span) []*prompb.BucketSpan {
|
||||
spans := make([]*prompb.BucketSpan, len(s))
|
||||
func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan {
|
||||
spans := make([]prompb.BucketSpan, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = &prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||
spans[i] = prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
|
|
|
@ -399,7 +399,7 @@ func exampleHistogramProto() prompb.Histogram {
|
|||
Schema: 0,
|
||||
ZeroThreshold: 0,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
|
||||
NegativeSpans: []*prompb.BucketSpan{
|
||||
NegativeSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 5,
|
||||
|
@ -414,7 +414,7 @@ func exampleHistogramProto() prompb.Histogram {
|
|||
},
|
||||
},
|
||||
NegativeDeltas: []int64{1, 2, -2, 1, -1, 0},
|
||||
PositiveSpans: []*prompb.BucketSpan{
|
||||
PositiveSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 4,
|
||||
|
@ -497,7 +497,7 @@ func exampleFloatHistogramProto() prompb.Histogram {
|
|||
Schema: 0,
|
||||
ZeroThreshold: 0,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: 0},
|
||||
NegativeSpans: []*prompb.BucketSpan{
|
||||
NegativeSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 5,
|
||||
|
@ -512,7 +512,7 @@ func exampleFloatHistogramProto() prompb.Histogram {
|
|||
},
|
||||
},
|
||||
NegativeCounts: []float64{1, 2, -2, 1, -1, 0},
|
||||
PositiveSpans: []*prompb.BucketSpan{
|
||||
PositiveSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 4,
|
||||
|
|
|
@ -54,10 +54,10 @@ func TestDB_InvalidSeries(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Histograms", func(t *testing.T) {
|
||||
_, err := app.AppendHistogram(0, labels.Labels{}, 0, tsdb.GenerateTestHistograms(1)[0], nil)
|
||||
_, err := app.AppendHistogram(0, labels.Labels{}, 0, tsdbutil.GenerateTestHistograms(1)[0], nil)
|
||||
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject empty labels")
|
||||
|
||||
_, err = app.AppendHistogram(0, labels.FromStrings("a", "1", "a", "2"), 0, tsdb.GenerateTestHistograms(1)[0], nil)
|
||||
_, err = app.AppendHistogram(0, labels.FromStrings("a", "1", "a", "2"), 0, tsdbutil.GenerateTestHistograms(1)[0], nil)
|
||||
require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject duplicate labels")
|
||||
})
|
||||
|
||||
|
@ -151,7 +151,7 @@ func TestCommit(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdb.GenerateTestHistograms(numHistograms)
|
||||
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
|
||||
|
@ -163,7 +163,7 @@ func TestCommit(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdb.GenerateTestFloatHistograms(numHistograms)
|
||||
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
|
||||
|
@ -257,7 +257,7 @@ func TestRollback(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdb.GenerateTestHistograms(numHistograms)
|
||||
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
|
||||
|
@ -269,7 +269,7 @@ func TestRollback(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdb.GenerateTestFloatHistograms(numHistograms)
|
||||
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
|
||||
|
@ -374,7 +374,7 @@ func TestFullTruncateWAL(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdb.GenerateTestHistograms(numHistograms)
|
||||
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(lastTs), histograms[i], nil)
|
||||
|
@ -387,7 +387,7 @@ func TestFullTruncateWAL(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdb.GenerateTestFloatHistograms(numHistograms)
|
||||
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(lastTs), nil, floatHistograms[i])
|
||||
|
@ -436,7 +436,7 @@ func TestPartialTruncateWAL(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdb.GenerateTestHistograms(numDatapoints)
|
||||
histograms := tsdbutil.GenerateTestHistograms(numDatapoints)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
|
||||
|
@ -449,7 +449,7 @@ func TestPartialTruncateWAL(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdb.GenerateTestFloatHistograms(numDatapoints)
|
||||
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numDatapoints)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
|
||||
|
@ -475,7 +475,7 @@ func TestPartialTruncateWAL(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdb.GenerateTestHistograms(numDatapoints)
|
||||
histograms := tsdbutil.GenerateTestHistograms(numDatapoints)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
|
||||
|
@ -488,7 +488,7 @@ func TestPartialTruncateWAL(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdb.GenerateTestFloatHistograms(numDatapoints)
|
||||
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numDatapoints)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
|
||||
|
@ -529,7 +529,7 @@ func TestWALReplay(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdb.GenerateTestHistograms(numHistograms)
|
||||
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
|
||||
|
@ -541,7 +541,7 @@ func TestWALReplay(t *testing.T) {
|
|||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdb.GenerateTestFloatHistograms(numHistograms)
|
||||
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
|
||||
|
@ -622,7 +622,7 @@ func Test_ExistingWAL_NextRef(t *testing.T) {
|
|||
}
|
||||
|
||||
histogramCount := 10
|
||||
histograms := tsdb.GenerateTestHistograms(histogramCount)
|
||||
histograms := tsdbutil.GenerateTestHistograms(histogramCount)
|
||||
// Append <histogramCount> series
|
||||
for i := 0; i < histogramCount; i++ {
|
||||
lset := labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("histogram_%d", i))
|
||||
|
|
|
@ -632,6 +632,16 @@ func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, flo
|
|||
},
|
||||
PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0},
|
||||
}
|
||||
if ts != mint {
|
||||
// By setting the counter reset hint to "no counter
|
||||
// reset" for all histograms but the first, we cover the
|
||||
// most common cases. If the series is manipulated later
|
||||
// or spans more than one block when ingested into the
|
||||
// storage, the hint has to be adjusted. Note that the
|
||||
// storage itself treats this particular hint the same
|
||||
// as "unknown".
|
||||
h.CounterResetHint = histogram.NotCounterReset
|
||||
}
|
||||
if floatHistogram {
|
||||
return sample{t: ts, fh: h.ToFloat()}
|
||||
}
|
||||
|
@ -661,6 +671,13 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in
|
|||
},
|
||||
PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0},
|
||||
}
|
||||
if count > 1 && count%5 != 1 {
|
||||
// Same rationale for this as above in
|
||||
// genHistogramSeries, just that we have to be
|
||||
// smarter to find out if the previous sample
|
||||
// was a histogram, too.
|
||||
h.CounterResetHint = histogram.NotCounterReset
|
||||
}
|
||||
if floatHistogram {
|
||||
s = sample{t: ts, fh: h.ToFloat()}
|
||||
} else {
|
||||
|
|
|
@ -217,10 +217,10 @@ func (a *FloatHistogramAppender) AppendHistogram(int64, *histogram.Histogram) {
|
|||
panic("appended an integer histogram to a float histogram chunk")
|
||||
}
|
||||
|
||||
// Appendable returns whether the chunk can be appended to, and if so
|
||||
// whether any recoding needs to happen using the provided interjections
|
||||
// (in case of any new buckets, positive or negative range, respectively).
|
||||
// If the sample is a gauge histogram, AppendableGauge must be used instead.
|
||||
// Appendable returns whether the chunk can be appended to, and if so whether
|
||||
// any recoding needs to happen using the provided inserts (in case of any new
|
||||
// buckets, positive or negative range, respectively). If the sample is a gauge
|
||||
// histogram, AppendableGauge must be used instead.
|
||||
//
|
||||
// The chunk is not appendable in the following cases:
|
||||
// - The schema has changed.
|
||||
|
@ -233,7 +233,7 @@ func (a *FloatHistogramAppender) AppendHistogram(int64, *histogram.Histogram) {
|
|||
// because of a counter reset. If the given sample is stale, it is always ok to
|
||||
// append. If counterReset is true, okToAppend is always false.
|
||||
func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
|
||||
positiveInterjections, negativeInterjections []Interjection,
|
||||
positiveInserts, negativeInserts []Insert,
|
||||
okToAppend, counterReset bool,
|
||||
) {
|
||||
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
|
||||
|
@ -267,12 +267,12 @@ func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
|
|||
}
|
||||
|
||||
var ok bool
|
||||
positiveInterjections, ok = forwardCompareSpans(a.pSpans, h.PositiveSpans)
|
||||
positiveInserts, ok = expandSpansForward(a.pSpans, h.PositiveSpans)
|
||||
if !ok {
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
negativeInterjections, ok = forwardCompareSpans(a.nSpans, h.NegativeSpans)
|
||||
negativeInserts, ok = expandSpansForward(a.nSpans, h.NegativeSpans)
|
||||
if !ok {
|
||||
counterReset = true
|
||||
return
|
||||
|
@ -280,7 +280,7 @@ func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
|
|||
|
||||
if counterResetInAnyFloatBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) ||
|
||||
counterResetInAnyFloatBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) {
|
||||
counterReset, positiveInterjections, negativeInterjections = true, nil, nil
|
||||
counterReset, positiveInserts, negativeInserts = true, nil, nil
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -290,10 +290,11 @@ func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
|
|||
|
||||
// AppendableGauge returns whether the chunk can be appended to, and if so
|
||||
// whether:
|
||||
// 1. Any recoding needs to happen to the chunk using the provided interjections
|
||||
// 1. Any recoding needs to happen to the chunk using the provided inserts
|
||||
// (in case of any new buckets, positive or negative range, respectively).
|
||||
// 2. Any recoding needs to happen for the histogram being appended, using the backward interjections
|
||||
// (in case of any missing buckets, positive or negative range, respectively).
|
||||
// 2. Any recoding needs to happen for the histogram being appended, using the
|
||||
// backward inserts (in case of any missing buckets, positive or negative
|
||||
// range, respectively).
|
||||
//
|
||||
// This method must be only used for gauge histograms.
|
||||
//
|
||||
|
@ -302,8 +303,8 @@ func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
|
|||
// - The threshold for the zero bucket has changed.
|
||||
// - The last sample in the chunk was stale while the current sample is not stale.
|
||||
func (a *FloatHistogramAppender) AppendableGauge(h *histogram.FloatHistogram) (
|
||||
positiveInterjections, negativeInterjections []Interjection,
|
||||
backwardPositiveInterjections, backwardNegativeInterjections []Interjection,
|
||||
positiveInserts, negativeInserts []Insert,
|
||||
backwardPositiveInserts, backwardNegativeInserts []Insert,
|
||||
positiveSpans, negativeSpans []histogram.Span,
|
||||
okToAppend bool,
|
||||
) {
|
||||
|
@ -325,8 +326,8 @@ func (a *FloatHistogramAppender) AppendableGauge(h *histogram.FloatHistogram) (
|
|||
return
|
||||
}
|
||||
|
||||
positiveInterjections, backwardPositiveInterjections, positiveSpans = bidirectionalCompareSpans(a.pSpans, h.PositiveSpans)
|
||||
negativeInterjections, backwardNegativeInterjections, negativeSpans = bidirectionalCompareSpans(a.nSpans, h.NegativeSpans)
|
||||
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
|
||||
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
|
||||
okToAppend = true
|
||||
return
|
||||
}
|
||||
|
@ -501,12 +502,12 @@ func (a *FloatHistogramAppender) writeXorValue(old *xorValue, v float64) {
|
|||
}
|
||||
|
||||
// Recode converts the current chunk to accommodate an expansion of the set of
|
||||
// (positive and/or negative) buckets used, according to the provided
|
||||
// interjections, resulting in the honoring of the provided new positive and
|
||||
// negative spans. To continue appending, use the returned Appender rather than
|
||||
// the receiver of this method.
|
||||
// (positive and/or negative) buckets used, according to the provided inserts,
|
||||
// resulting in the honoring of the provided new positive and negative spans. To
|
||||
// continue appending, use the returned Appender rather than the receiver of
|
||||
// this method.
|
||||
func (a *FloatHistogramAppender) Recode(
|
||||
positiveInterjections, negativeInterjections []Interjection,
|
||||
positiveInserts, negativeInserts []Insert,
|
||||
positiveSpans, negativeSpans []histogram.Span,
|
||||
) (Chunk, Appender) {
|
||||
// TODO(beorn7): This currently just decodes everything and then encodes
|
||||
|
@ -539,11 +540,11 @@ func (a *FloatHistogramAppender) Recode(
|
|||
|
||||
// Save the modified histogram to the new chunk.
|
||||
hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans
|
||||
if len(positiveInterjections) > 0 {
|
||||
hOld.PositiveBuckets = interject(hOld.PositiveBuckets, positiveBuckets, positiveInterjections, false)
|
||||
if len(positiveInserts) > 0 {
|
||||
hOld.PositiveBuckets = insert(hOld.PositiveBuckets, positiveBuckets, positiveInserts, false)
|
||||
}
|
||||
if len(negativeInterjections) > 0 {
|
||||
hOld.NegativeBuckets = interject(hOld.NegativeBuckets, negativeBuckets, negativeInterjections, false)
|
||||
if len(negativeInserts) > 0 {
|
||||
hOld.NegativeBuckets = insert(hOld.NegativeBuckets, negativeBuckets, negativeInserts, false)
|
||||
}
|
||||
app.AppendFloatHistogram(tOld, hOld)
|
||||
}
|
||||
|
@ -556,15 +557,15 @@ func (a *FloatHistogramAppender) Recode(
|
|||
// (positive and/or negative) buckets used.
|
||||
func (a *FloatHistogramAppender) RecodeHistogramm(
|
||||
fh *histogram.FloatHistogram,
|
||||
pBackwardInter, nBackwardInter []Interjection,
|
||||
pBackwardInter, nBackwardInter []Insert,
|
||||
) {
|
||||
if len(pBackwardInter) > 0 {
|
||||
numPositiveBuckets := countSpans(fh.PositiveSpans)
|
||||
fh.PositiveBuckets = interject(fh.PositiveBuckets, make([]float64, numPositiveBuckets), pBackwardInter, false)
|
||||
fh.PositiveBuckets = insert(fh.PositiveBuckets, make([]float64, numPositiveBuckets), pBackwardInter, false)
|
||||
}
|
||||
if len(nBackwardInter) > 0 {
|
||||
numNegativeBuckets := countSpans(fh.NegativeSpans)
|
||||
fh.NegativeBuckets = interject(fh.NegativeBuckets, make([]float64, numNegativeBuckets), nBackwardInter, false)
|
||||
fh.NegativeBuckets = insert(fh.NegativeBuckets, make([]float64, numNegativeBuckets), nBackwardInter, false)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -627,12 +628,8 @@ func (it *floatHistogramIterator) AtFloatHistogram() (int64, *histogram.FloatHis
|
|||
return it.t, &histogram.FloatHistogram{Sum: it.sum.value}
|
||||
}
|
||||
it.atFloatHistogramCalled = true
|
||||
crHint := histogram.UnknownCounterReset
|
||||
if it.counterResetHeader == GaugeType {
|
||||
crHint = histogram.GaugeType
|
||||
}
|
||||
return it.t, &histogram.FloatHistogram{
|
||||
CounterResetHint: crHint,
|
||||
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
|
||||
Count: it.cnt.value,
|
||||
ZeroCount: it.zCnt.value,
|
||||
Sum: it.sum.value,
|
||||
|
|
|
@ -66,7 +66,9 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
|
|||
h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14)
|
||||
h.NegativeBuckets = []int64{4, -1, 1, -1} // counts: 4, 3, 4, 4 (total 15)
|
||||
app.AppendFloatHistogram(ts, h.ToFloat())
|
||||
exp = append(exp, floatResult{t: ts, h: h.ToFloat()})
|
||||
expH := h.ToFloat()
|
||||
expH.CounterResetHint = histogram.NotCounterReset
|
||||
exp = append(exp, floatResult{t: ts, h: expH})
|
||||
require.Equal(t, 2, c.NumSamples())
|
||||
|
||||
// Add update with new appender.
|
||||
|
@ -81,7 +83,9 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
|
|||
h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27)
|
||||
h.NegativeBuckets = []int64{5, 1, -2, 3} // counts: 5, 6, 4, 7 (total 22)
|
||||
app.AppendFloatHistogram(ts, h.ToFloat())
|
||||
exp = append(exp, floatResult{t: ts, h: h.ToFloat()})
|
||||
expH = h.ToFloat()
|
||||
expH.CounterResetHint = histogram.NotCounterReset
|
||||
exp = append(exp, floatResult{t: ts, h: expH})
|
||||
require.Equal(t, 3, c.NumSamples())
|
||||
|
||||
// 1. Expand iterator in simple case.
|
||||
|
@ -138,7 +142,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
|
|||
require.Equal(t, ValNone, it4.Seek(exp[len(exp)-1].t+1))
|
||||
}
|
||||
|
||||
// Mimics the scenario described for compareSpans().
|
||||
// Mimics the scenario described for expandSpansForward.
|
||||
func TestFloatHistogramChunkBucketChanges(t *testing.T) {
|
||||
c := Chunk(NewFloatHistogramChunk())
|
||||
|
||||
|
@ -209,9 +213,11 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
|
|||
h1.PositiveBuckets = []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1}
|
||||
h1.NegativeSpans = h2.NegativeSpans
|
||||
h1.NegativeBuckets = []int64{0, 1}
|
||||
expH2 := h2.ToFloat()
|
||||
expH2.CounterResetHint = histogram.NotCounterReset
|
||||
exp := []floatResult{
|
||||
{t: ts1, h: h1.ToFloat()},
|
||||
{t: ts2, h: h2.ToFloat()},
|
||||
{t: ts2, h: expH2},
|
||||
}
|
||||
it := c.Iterator(nil)
|
||||
var act []floatResult
|
||||
|
|
|
@ -243,10 +243,10 @@ func (a *HistogramAppender) AppendFloatHistogram(int64, *histogram.FloatHistogra
|
|||
panic("appended a float histogram to a histogram chunk")
|
||||
}
|
||||
|
||||
// Appendable returns whether the chunk can be appended to, and if so
|
||||
// whether any recoding needs to happen using the provided interjections
|
||||
// (in case of any new buckets, positive or negative range, respectively).
|
||||
// If the sample is a gauge histogram, AppendableGauge must be used instead.
|
||||
// Appendable returns whether the chunk can be appended to, and if so whether
|
||||
// any recoding needs to happen using the provided inserts (in case of any new
|
||||
// buckets, positive or negative range, respectively). If the sample is a gauge
|
||||
// histogram, AppendableGauge must be used instead.
|
||||
//
|
||||
// The chunk is not appendable in the following cases:
|
||||
//
|
||||
|
@ -261,7 +261,7 @@ func (a *HistogramAppender) AppendFloatHistogram(int64, *histogram.FloatHistogra
|
|||
// because of a counter reset. If the given sample is stale, it is always ok to
|
||||
// append. If counterReset is true, okToAppend is always false.
|
||||
func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
|
||||
positiveInterjections, negativeInterjections []Interjection,
|
||||
positiveInserts, negativeInserts []Insert,
|
||||
okToAppend, counterReset bool,
|
||||
) {
|
||||
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
|
||||
|
@ -295,12 +295,12 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
|
|||
}
|
||||
|
||||
var ok bool
|
||||
positiveInterjections, ok = forwardCompareSpans(a.pSpans, h.PositiveSpans)
|
||||
positiveInserts, ok = expandSpansForward(a.pSpans, h.PositiveSpans)
|
||||
if !ok {
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
negativeInterjections, ok = forwardCompareSpans(a.nSpans, h.NegativeSpans)
|
||||
negativeInserts, ok = expandSpansForward(a.nSpans, h.NegativeSpans)
|
||||
if !ok {
|
||||
counterReset = true
|
||||
return
|
||||
|
@ -308,7 +308,7 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
|
|||
|
||||
if counterResetInAnyBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) ||
|
||||
counterResetInAnyBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) {
|
||||
counterReset, positiveInterjections, negativeInterjections = true, nil, nil
|
||||
counterReset, positiveInserts, negativeInserts = true, nil, nil
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -318,10 +318,11 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
|
|||
|
||||
// AppendableGauge returns whether the chunk can be appended to, and if so
|
||||
// whether:
|
||||
// 1. Any recoding needs to happen to the chunk using the provided interjections
|
||||
// 1. Any recoding needs to happen to the chunk using the provided inserts
|
||||
// (in case of any new buckets, positive or negative range, respectively).
|
||||
// 2. Any recoding needs to happen for the histogram being appended, using the backward interjections
|
||||
// (in case of any missing buckets, positive or negative range, respectively).
|
||||
// 2. Any recoding needs to happen for the histogram being appended, using the
|
||||
// backward inserts (in case of any missing buckets, positive or negative
|
||||
// range, respectively).
|
||||
//
|
||||
// This method must be only used for gauge histograms.
|
||||
//
|
||||
|
@ -330,8 +331,8 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
|
|||
// - The threshold for the zero bucket has changed.
|
||||
// - The last sample in the chunk was stale while the current sample is not stale.
|
||||
func (a *HistogramAppender) AppendableGauge(h *histogram.Histogram) (
|
||||
positiveInterjections, negativeInterjections []Interjection,
|
||||
backwardPositiveInterjections, backwardNegativeInterjections []Interjection,
|
||||
positiveInserts, negativeInserts []Insert,
|
||||
backwardPositiveInserts, backwardNegativeInserts []Insert,
|
||||
positiveSpans, negativeSpans []histogram.Span,
|
||||
okToAppend bool,
|
||||
) {
|
||||
|
@ -353,8 +354,8 @@ func (a *HistogramAppender) AppendableGauge(h *histogram.Histogram) (
|
|||
return
|
||||
}
|
||||
|
||||
positiveInterjections, backwardPositiveInterjections, positiveSpans = bidirectionalCompareSpans(a.pSpans, h.PositiveSpans)
|
||||
negativeInterjections, backwardNegativeInterjections, negativeSpans = bidirectionalCompareSpans(a.nSpans, h.NegativeSpans)
|
||||
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
|
||||
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
|
||||
okToAppend = true
|
||||
return
|
||||
}
|
||||
|
@ -488,8 +489,9 @@ func (a *HistogramAppender) AppendHistogram(t int64, h *histogram.Histogram) {
|
|||
putVarbitInt(a.b, b)
|
||||
}
|
||||
} else {
|
||||
// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
|
||||
// so we don't need a separate single delta logic for the 2nd sample.
|
||||
// The case for the 2nd sample with single deltas is implicitly
|
||||
// handled correctly with the double delta code, so we don't
|
||||
// need a separate single delta logic for the 2nd sample.
|
||||
|
||||
tDelta = t - a.t
|
||||
cntDelta = int64(h.Count) - int64(a.cnt)
|
||||
|
@ -539,12 +541,12 @@ func (a *HistogramAppender) AppendHistogram(t int64, h *histogram.Histogram) {
|
|||
}
|
||||
|
||||
// Recode converts the current chunk to accommodate an expansion of the set of
|
||||
// (positive and/or negative) buckets used, according to the provided
|
||||
// interjections, resulting in the honoring of the provided new positive and
|
||||
// negative spans. To continue appending, use the returned Appender rather than
|
||||
// the receiver of this method.
|
||||
// (positive and/or negative) buckets used, according to the provided inserts,
|
||||
// resulting in the honoring of the provided new positive and negative spans. To
|
||||
// continue appending, use the returned Appender rather than the receiver of
|
||||
// this method.
|
||||
func (a *HistogramAppender) Recode(
|
||||
positiveInterjections, negativeInterjections []Interjection,
|
||||
positiveInserts, negativeInserts []Insert,
|
||||
positiveSpans, negativeSpans []histogram.Span,
|
||||
) (Chunk, Appender) {
|
||||
// TODO(beorn7): This currently just decodes everything and then encodes
|
||||
|
@ -577,11 +579,11 @@ func (a *HistogramAppender) Recode(
|
|||
|
||||
// Save the modified histogram to the new chunk.
|
||||
hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans
|
||||
if len(positiveInterjections) > 0 {
|
||||
hOld.PositiveBuckets = interject(hOld.PositiveBuckets, positiveBuckets, positiveInterjections, true)
|
||||
if len(positiveInserts) > 0 {
|
||||
hOld.PositiveBuckets = insert(hOld.PositiveBuckets, positiveBuckets, positiveInserts, true)
|
||||
}
|
||||
if len(negativeInterjections) > 0 {
|
||||
hOld.NegativeBuckets = interject(hOld.NegativeBuckets, negativeBuckets, negativeInterjections, true)
|
||||
if len(negativeInserts) > 0 {
|
||||
hOld.NegativeBuckets = insert(hOld.NegativeBuckets, negativeBuckets, negativeInserts, true)
|
||||
}
|
||||
app.AppendHistogram(tOld, hOld)
|
||||
}
|
||||
|
@ -590,19 +592,19 @@ func (a *HistogramAppender) Recode(
|
|||
return hc, app
|
||||
}
|
||||
|
||||
// RecodeHistogram converts the current histogram (in-place) to accommodate an expansion of the set of
|
||||
// (positive and/or negative) buckets used.
|
||||
// RecodeHistogram converts the current histogram (in-place) to accommodate an
|
||||
// expansion of the set of (positive and/or negative) buckets used.
|
||||
func (a *HistogramAppender) RecodeHistogram(
|
||||
h *histogram.Histogram,
|
||||
pBackwardInter, nBackwardInter []Interjection,
|
||||
pBackwardInserts, nBackwardInserts []Insert,
|
||||
) {
|
||||
if len(pBackwardInter) > 0 {
|
||||
if len(pBackwardInserts) > 0 {
|
||||
numPositiveBuckets := countSpans(h.PositiveSpans)
|
||||
h.PositiveBuckets = interject(h.PositiveBuckets, make([]int64, numPositiveBuckets), pBackwardInter, true)
|
||||
h.PositiveBuckets = insert(h.PositiveBuckets, make([]int64, numPositiveBuckets), pBackwardInserts, true)
|
||||
}
|
||||
if len(nBackwardInter) > 0 {
|
||||
if len(nBackwardInserts) > 0 {
|
||||
numNegativeBuckets := countSpans(h.NegativeSpans)
|
||||
h.NegativeBuckets = interject(h.NegativeBuckets, make([]int64, numNegativeBuckets), nBackwardInter, true)
|
||||
h.NegativeBuckets = insert(h.NegativeBuckets, make([]int64, numNegativeBuckets), nBackwardInserts, true)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -665,12 +667,8 @@ func (it *histogramIterator) AtHistogram() (int64, *histogram.Histogram) {
|
|||
return it.t, &histogram.Histogram{Sum: it.sum}
|
||||
}
|
||||
it.atHistogramCalled = true
|
||||
crHint := histogram.UnknownCounterReset
|
||||
if it.counterResetHeader == GaugeType {
|
||||
crHint = histogram.GaugeType
|
||||
}
|
||||
return it.t, &histogram.Histogram{
|
||||
CounterResetHint: crHint,
|
||||
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
|
||||
Count: it.cnt,
|
||||
ZeroCount: it.zCnt,
|
||||
Sum: it.sum,
|
||||
|
@ -688,12 +686,8 @@ func (it *histogramIterator) AtFloatHistogram() (int64, *histogram.FloatHistogra
|
|||
return it.t, &histogram.FloatHistogram{Sum: it.sum}
|
||||
}
|
||||
it.atFloatHistogramCalled = true
|
||||
crHint := histogram.UnknownCounterReset
|
||||
if it.counterResetHeader == GaugeType {
|
||||
crHint = histogram.GaugeType
|
||||
}
|
||||
return it.t, &histogram.FloatHistogram{
|
||||
CounterResetHint: crHint,
|
||||
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
|
||||
Count: float64(it.cnt),
|
||||
ZeroCount: float64(it.zCnt),
|
||||
Sum: it.sum,
|
||||
|
|
|
@ -19,7 +19,10 @@ import (
|
|||
"github.com/prometheus/prometheus/model/histogram"
|
||||
)
|
||||
|
||||
func writeHistogramChunkLayout(b *bstream, schema int32, zeroThreshold float64, positiveSpans, negativeSpans []histogram.Span) {
|
||||
func writeHistogramChunkLayout(
|
||||
b *bstream, schema int32, zeroThreshold float64,
|
||||
positiveSpans, negativeSpans []histogram.Span,
|
||||
) {
|
||||
putZeroThreshold(b, zeroThreshold)
|
||||
putVarbitInt(b, int64(schema))
|
||||
putHistogramChunkLayoutSpans(b, positiveSpans)
|
||||
|
@ -91,9 +94,7 @@ func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
|
|||
|
||||
// putZeroThreshold writes the zero threshold to the bstream. It stores typical
|
||||
// values in just one byte, but needs 9 bytes for other values. In detail:
|
||||
//
|
||||
// * If the threshold is 0, store a single zero byte.
|
||||
//
|
||||
// - If the threshold is 0, store a single zero byte.
|
||||
// - If the threshold is a power of 2 between (and including) 2^-243 and 2^10,
|
||||
// take the exponent from the IEEE 754 representation of the threshold, which
|
||||
// covers a range between (and including) -242 and 11. (2^-243 is 0.5*2^-242
|
||||
|
@ -103,7 +104,6 @@ func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
|
|||
// threshold. The default value for the zero threshold is 2^-128 (or
|
||||
// 0.5*2^-127 in IEEE 754 representation) and will therefore be encoded as a
|
||||
// single byte (with value 116).
|
||||
//
|
||||
// - In all other cases, store 255 as a single byte, followed by the 8 bytes of
|
||||
// the threshold as a float64, i.e. taking 9 bytes in total.
|
||||
func putZeroThreshold(b *bstream, threshold float64) {
|
||||
|
@ -186,16 +186,16 @@ func (b *bucketIterator) Next() (int, bool) {
|
|||
return 0, false
|
||||
}
|
||||
|
||||
// An Interjection describes how many new buckets have to be introduced before
|
||||
// processing the pos'th delta from the original slice.
|
||||
type Interjection struct {
|
||||
// An Insert describes how many new buckets have to be inserted before
|
||||
// processing the pos'th bucket from the original slice.
|
||||
type Insert struct {
|
||||
pos int
|
||||
num int
|
||||
}
|
||||
|
||||
// forwardCompareSpans returns the interjections to convert a slice of deltas to a new
|
||||
// slice representing an expanded set of buckets, or false if incompatible
|
||||
// (e.g. if buckets were removed).
|
||||
// expandSpansForward returns the inserts to expand the bucket spans 'a' so that
|
||||
// they match the spans in 'b'. 'b' must cover the same or more buckets than
|
||||
// 'a', otherwise the function will return false.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
|
@ -222,25 +222,25 @@ type Interjection struct {
|
|||
// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1
|
||||
// delta mods: / \ / \ / \
|
||||
//
|
||||
// Note that whenever any new buckets are introduced, the subsequent "old"
|
||||
// bucket needs to readjust its delta to the new base of 0. Thus, for the caller
|
||||
// who wants to transform the set of original deltas to a new set of deltas to
|
||||
// match a new span layout that adds buckets, we simply need to generate a list
|
||||
// of interjections.
|
||||
// Note for histograms with delta-encoded buckets: Whenever any new buckets are
|
||||
// introduced, the subsequent "old" bucket needs to readjust its delta to the
|
||||
// new base of 0. Thus, for the caller who wants to transform the set of
|
||||
// original deltas to a new set of deltas to match a new span layout that adds
|
||||
// buckets, we simply need to generate a list of inserts.
|
||||
//
|
||||
// Note: Within forwardCompareSpans we don't have to worry about the changes to the
|
||||
// Note: Within expandSpansForward we don't have to worry about the changes to the
|
||||
// spans themselves, thanks to the iterators we get to work with the more useful
|
||||
// bucket indices (which of course directly correspond to the buckets we have to
|
||||
// adjust).
|
||||
func forwardCompareSpans(a, b []histogram.Span) (forward []Interjection, ok bool) {
|
||||
func expandSpansForward(a, b []histogram.Span) (forward []Insert, ok bool) {
|
||||
ai := newBucketIterator(a)
|
||||
bi := newBucketIterator(b)
|
||||
|
||||
var interjections []Interjection
|
||||
var inserts []Insert
|
||||
|
||||
// When inter.num becomes > 0, this becomes a valid interjection that
|
||||
// should be yielded when we finish a streak of new buckets.
|
||||
var inter Interjection
|
||||
// When inter.num becomes > 0, this becomes a valid insert that should
|
||||
// be yielded when we finish a streak of new buckets.
|
||||
var inter Insert
|
||||
|
||||
av, aOK := ai.Next()
|
||||
bv, bOK := bi.Next()
|
||||
|
@ -250,43 +250,46 @@ loop:
|
|||
case aOK && bOK:
|
||||
switch {
|
||||
case av == bv: // Both have an identical value. move on!
|
||||
// Finish WIP interjection and reset.
|
||||
// Finish WIP insert and reset.
|
||||
if inter.num > 0 {
|
||||
interjections = append(interjections, inter)
|
||||
inserts = append(inserts, inter)
|
||||
}
|
||||
inter.num = 0
|
||||
av, aOK = ai.Next()
|
||||
bv, bOK = bi.Next()
|
||||
inter.pos++
|
||||
case av < bv: // b misses a value that is in a.
|
||||
return interjections, false
|
||||
return inserts, false
|
||||
case av > bv: // a misses a value that is in b. Forward b and recompare.
|
||||
inter.num++
|
||||
bv, bOK = bi.Next()
|
||||
}
|
||||
case aOK && !bOK: // b misses a value that is in a.
|
||||
return interjections, false
|
||||
return inserts, false
|
||||
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
|
||||
inter.num++
|
||||
bv, bOK = bi.Next()
|
||||
default: // Both iterators ran out. We're done.
|
||||
if inter.num > 0 {
|
||||
interjections = append(interjections, inter)
|
||||
inserts = append(inserts, inter)
|
||||
}
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
return interjections, true
|
||||
return inserts, true
|
||||
}
|
||||
|
||||
// bidirectionalCompareSpans does everything that forwardCompareSpans does and
|
||||
// also returns interjections in the other direction (i.e. buckets missing in b that are missing in a).
|
||||
func bidirectionalCompareSpans(a, b []histogram.Span) (forward, backward []Interjection, mergedSpans []histogram.Span) {
|
||||
// expandSpansBothWays is similar to expandSpansForward, but now b may also
|
||||
// cover an entirely different set of buckets. The function returns the
|
||||
// “forward” inserts to expand 'a' to also cover all the buckets exclusively
|
||||
// covered by 'b', and it returns the “backward” inserts to expand 'b' to also
|
||||
// cover all the buckets exclusively covered by 'a'
|
||||
func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) {
|
||||
ai := newBucketIterator(a)
|
||||
bi := newBucketIterator(b)
|
||||
|
||||
var interjections, bInterjections []Interjection
|
||||
var fInserts, bInserts []Insert
|
||||
var lastBucket int
|
||||
addBucket := func(b int) {
|
||||
offset := b - lastBucket - 1
|
||||
|
@ -305,9 +308,10 @@ func bidirectionalCompareSpans(a, b []histogram.Span) (forward, backward []Inter
|
|||
lastBucket = b
|
||||
}
|
||||
|
||||
// When inter.num becomes > 0, this becomes a valid interjection that
|
||||
// should be yielded when we finish a streak of new buckets.
|
||||
var inter, bInter Interjection
|
||||
// When fInter.num (or bInter.num, respectively) becomes > 0, this
|
||||
// becomes a valid insert that should be yielded when we finish a streak
|
||||
// of new buckets.
|
||||
var fInter, bInter Insert
|
||||
|
||||
av, aOK := ai.Next()
|
||||
bv, bOK := bi.Next()
|
||||
|
@ -317,37 +321,37 @@ loop:
|
|||
case aOK && bOK:
|
||||
switch {
|
||||
case av == bv: // Both have an identical value. move on!
|
||||
// Finish WIP interjection and reset.
|
||||
if inter.num > 0 {
|
||||
interjections = append(interjections, inter)
|
||||
inter.num = 0
|
||||
// Finish WIP insert and reset.
|
||||
if fInter.num > 0 {
|
||||
fInserts = append(fInserts, fInter)
|
||||
fInter.num = 0
|
||||
}
|
||||
if bInter.num > 0 {
|
||||
bInterjections = append(bInterjections, bInter)
|
||||
bInserts = append(bInserts, bInter)
|
||||
bInter.num = 0
|
||||
}
|
||||
addBucket(av)
|
||||
av, aOK = ai.Next()
|
||||
bv, bOK = bi.Next()
|
||||
inter.pos++
|
||||
fInter.pos++
|
||||
bInter.pos++
|
||||
case av < bv: // b misses a value that is in a.
|
||||
bInter.num++
|
||||
// Collect the forward interjection before advancing the
|
||||
// position of 'a'.
|
||||
if inter.num > 0 {
|
||||
interjections = append(interjections, inter)
|
||||
inter.num = 0
|
||||
// Collect the forward inserts before advancing
|
||||
// the position of 'a'.
|
||||
if fInter.num > 0 {
|
||||
fInserts = append(fInserts, fInter)
|
||||
fInter.num = 0
|
||||
}
|
||||
addBucket(av)
|
||||
inter.pos++
|
||||
fInter.pos++
|
||||
av, aOK = ai.Next()
|
||||
case av > bv: // a misses a value that is in b. Forward b and recompare.
|
||||
inter.num++
|
||||
// Collect the backward interjection before advancing the
|
||||
fInter.num++
|
||||
// Collect the backward inserts before advancing the
|
||||
// position of 'b'.
|
||||
if bInter.num > 0 {
|
||||
bInterjections = append(bInterjections, bInter)
|
||||
bInserts = append(bInserts, bInter)
|
||||
bInter.num = 0
|
||||
}
|
||||
addBucket(bv)
|
||||
|
@ -359,92 +363,127 @@ loop:
|
|||
addBucket(av)
|
||||
av, aOK = ai.Next()
|
||||
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
|
||||
inter.num++
|
||||
fInter.num++
|
||||
addBucket(bv)
|
||||
bv, bOK = bi.Next()
|
||||
default: // Both iterators ran out. We're done.
|
||||
if inter.num > 0 {
|
||||
interjections = append(interjections, inter)
|
||||
if fInter.num > 0 {
|
||||
fInserts = append(fInserts, fInter)
|
||||
}
|
||||
if bInter.num > 0 {
|
||||
bInterjections = append(bInterjections, bInter)
|
||||
bInserts = append(bInserts, bInter)
|
||||
}
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
return interjections, bInterjections, mergedSpans
|
||||
return fInserts, bInserts, mergedSpans
|
||||
}
|
||||
|
||||
type bucketValue interface {
|
||||
int64 | float64
|
||||
}
|
||||
|
||||
// interject merges 'in' with the provided interjections and writes them into
|
||||
// 'out', which must already have the appropriate length.
|
||||
func interject[BV bucketValue](in, out []BV, interjections []Interjection, deltas bool) []BV {
|
||||
// insert merges 'in' with the provided inserts and writes them into 'out',
|
||||
// which must already have the appropriate length. 'out' is also returned for
|
||||
// convenience.
|
||||
func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV {
|
||||
var (
|
||||
j int // Position in out.
|
||||
v BV // The last value seen.
|
||||
interj int // The next interjection to process.
|
||||
oi int // Position in out.
|
||||
v BV // The last value seen.
|
||||
ii int // The next insert to process.
|
||||
)
|
||||
for i, d := range in {
|
||||
if interj < len(interjections) && i == interjections[interj].pos {
|
||||
|
||||
// We have an interjection!
|
||||
// Add interjection.num new delta values such that their bucket values equate 0.
|
||||
// When deltas==false, it means that it is an absolute value. So we set it to 0 directly.
|
||||
if ii < len(inserts) && i == inserts[ii].pos {
|
||||
// We have an insert!
|
||||
// Add insert.num new delta values such that their
|
||||
// bucket values equate 0. When deltas==false, it means
|
||||
// that it is an absolute value. So we set it to 0
|
||||
// directly.
|
||||
if deltas {
|
||||
out[j] = -v
|
||||
out[oi] = -v
|
||||
} else {
|
||||
out[j] = 0
|
||||
out[oi] = 0
|
||||
}
|
||||
j++
|
||||
for x := 1; x < interjections[interj].num; x++ {
|
||||
out[j] = 0
|
||||
j++
|
||||
oi++
|
||||
for x := 1; x < inserts[ii].num; x++ {
|
||||
out[oi] = 0
|
||||
oi++
|
||||
}
|
||||
interj++
|
||||
ii++
|
||||
|
||||
// Now save the value from the input. The delta value we
|
||||
// should save is the original delta value + the last
|
||||
// value of the point before the interjection (to undo
|
||||
// the delta that was introduced by the interjection).
|
||||
// When deltas==false, it means that it is an absolute value,
|
||||
// value of the point before the insert (to undo the
|
||||
// delta that was introduced by the insert). When
|
||||
// deltas==false, it means that it is an absolute value,
|
||||
// so we set it directly to the value in the 'in' slice.
|
||||
if deltas {
|
||||
out[j] = d + v
|
||||
out[oi] = d + v
|
||||
} else {
|
||||
out[j] = d
|
||||
out[oi] = d
|
||||
}
|
||||
j++
|
||||
oi++
|
||||
v = d + v
|
||||
continue
|
||||
}
|
||||
|
||||
// If there was no interjection, the original delta is still
|
||||
// valid.
|
||||
out[j] = d
|
||||
j++
|
||||
// If there was no insert, the original delta is still valid.
|
||||
out[oi] = d
|
||||
oi++
|
||||
v += d
|
||||
}
|
||||
switch interj {
|
||||
case len(interjections):
|
||||
// All interjections processed. Nothing more to do.
|
||||
case len(interjections) - 1:
|
||||
// One more interjection to process at the end.
|
||||
switch ii {
|
||||
case len(inserts):
|
||||
// All inserts processed. Nothing more to do.
|
||||
case len(inserts) - 1:
|
||||
// One more insert to process at the end.
|
||||
if deltas {
|
||||
out[j] = -v
|
||||
out[oi] = -v
|
||||
} else {
|
||||
out[j] = 0
|
||||
out[oi] = 0
|
||||
}
|
||||
j++
|
||||
for x := 1; x < interjections[interj].num; x++ {
|
||||
out[j] = 0
|
||||
j++
|
||||
oi++
|
||||
for x := 1; x < inserts[ii].num; x++ {
|
||||
out[oi] = 0
|
||||
oi++
|
||||
}
|
||||
default:
|
||||
panic("unprocessed interjections left")
|
||||
panic("unprocessed inserts left")
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// counterResetHint returns a CounterResetHint based on the CounterResetHeader
|
||||
// and on the position into the chunk.
|
||||
func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterResetHint {
|
||||
switch {
|
||||
case crh == GaugeType:
|
||||
// A gauge histogram chunk only contains gauge histograms.
|
||||
return histogram.GaugeType
|
||||
case numRead > 1:
|
||||
// In a counter histogram chunk, there will not be any counter
|
||||
// resets after the first histogram.
|
||||
return histogram.NotCounterReset
|
||||
case crh == CounterReset:
|
||||
// If the chunk was started because of a counter reset, we can
|
||||
// safely return that hint. This histogram always has to be
|
||||
// treated as a counter reset.
|
||||
return histogram.CounterReset
|
||||
default:
|
||||
// Sadly, we have to return "unknown" as the hint for all other
|
||||
// cases, even if we know that the chunk was started without a
|
||||
// counter reset. But we cannot be sure that the previous chunk
|
||||
// still exists in the TSDB, so we conservatively return
|
||||
// "unknown". On the bright side, this case should be relatively
|
||||
// rare.
|
||||
//
|
||||
// TODO(beorn7): Nevertheless, if the current chunk is in the
|
||||
// middle of a block (not the first chunk in the block for this
|
||||
// series), it's probably safe to assume that the previous chunk
|
||||
// will exist in the TSDB for as long as the current chunk
|
||||
// exist, and we could safely return
|
||||
// "histogram.NotCounterReset". This needs some more work and
|
||||
// might not be worth the effort and/or risk. To be vetted...
|
||||
return histogram.UnknownCounterReset
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ func TestBucketIterator(t *testing.T) {
|
|||
},
|
||||
idxs: []int{100, 101, 102, 103, 112, 113, 114, 115, 116, 117, 118, 119},
|
||||
},
|
||||
// The below 2 sets ore the ones described in compareSpans's comments.
|
||||
// The below 2 sets ore the ones described in expandSpansForward's comments.
|
||||
{
|
||||
spans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
|
@ -111,12 +111,12 @@ func TestBucketIterator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCompareSpansAndInterject(t *testing.T) {
|
||||
func TestCompareSpansAndInsert(t *testing.T) {
|
||||
scenarios := []struct {
|
||||
description string
|
||||
spansA, spansB []histogram.Span
|
||||
interjections, backwardInterjections []Interjection
|
||||
bucketsIn, bucketsOut []int64
|
||||
description string
|
||||
spansA, spansB []histogram.Span
|
||||
fInserts, bInserts []Insert
|
||||
bucketsIn, bucketsOut []int64
|
||||
}{
|
||||
{
|
||||
description: "single prepend at the beginning",
|
||||
|
@ -126,7 +126,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
spansB: []histogram.Span{
|
||||
{Offset: -11, Length: 4},
|
||||
},
|
||||
interjections: []Interjection{
|
||||
fInserts: []Insert{
|
||||
{
|
||||
pos: 0,
|
||||
num: 1,
|
||||
|
@ -143,7 +143,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
spansB: []histogram.Span{
|
||||
{Offset: -10, Length: 4},
|
||||
},
|
||||
interjections: []Interjection{
|
||||
fInserts: []Insert{
|
||||
{
|
||||
pos: 3,
|
||||
num: 1,
|
||||
|
@ -160,7 +160,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
spansB: []histogram.Span{
|
||||
{Offset: -12, Length: 5},
|
||||
},
|
||||
interjections: []Interjection{
|
||||
fInserts: []Insert{
|
||||
{
|
||||
pos: 0,
|
||||
num: 2,
|
||||
|
@ -177,7 +177,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
spansB: []histogram.Span{
|
||||
{Offset: -10, Length: 5},
|
||||
},
|
||||
interjections: []Interjection{
|
||||
fInserts: []Insert{
|
||||
{
|
||||
pos: 3,
|
||||
num: 2,
|
||||
|
@ -194,7 +194,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
spansB: []histogram.Span{
|
||||
{Offset: -12, Length: 7},
|
||||
},
|
||||
interjections: []Interjection{
|
||||
fInserts: []Insert{
|
||||
{
|
||||
pos: 0,
|
||||
num: 2,
|
||||
|
@ -215,7 +215,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
spansB: []histogram.Span{
|
||||
{Offset: -9, Length: 3},
|
||||
},
|
||||
backwardInterjections: []Interjection{
|
||||
bInserts: []Insert{
|
||||
{pos: 0, num: 1},
|
||||
},
|
||||
},
|
||||
|
@ -228,7 +228,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
{Offset: -10, Length: 2},
|
||||
{Offset: 1, Length: 1},
|
||||
},
|
||||
backwardInterjections: []Interjection{
|
||||
bInserts: []Insert{
|
||||
{pos: 2, num: 1},
|
||||
},
|
||||
},
|
||||
|
@ -240,7 +240,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
spansB: []histogram.Span{
|
||||
{Offset: -10, Length: 3},
|
||||
},
|
||||
backwardInterjections: []Interjection{
|
||||
bInserts: []Insert{
|
||||
{pos: 3, num: 1},
|
||||
},
|
||||
},
|
||||
|
@ -259,7 +259,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
{Offset: 1, Length: 4},
|
||||
{Offset: 3, Length: 3},
|
||||
},
|
||||
interjections: []Interjection{
|
||||
fInserts: []Insert{
|
||||
{
|
||||
pos: 2,
|
||||
num: 1,
|
||||
|
@ -277,7 +277,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
bucketsOut: []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1},
|
||||
},
|
||||
{
|
||||
description: "both forward and backward interjections, complex case",
|
||||
description: "both forward and backward inserts, complex case",
|
||||
spansA: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 2, Length: 1},
|
||||
|
@ -292,7 +292,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
{Offset: 1, Length: 1},
|
||||
{Offset: 4, Length: 1},
|
||||
},
|
||||
interjections: []Interjection{
|
||||
fInserts: []Insert{
|
||||
{
|
||||
pos: 2,
|
||||
num: 1,
|
||||
|
@ -306,7 +306,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
num: 1,
|
||||
},
|
||||
},
|
||||
backwardInterjections: []Interjection{
|
||||
bInserts: []Insert{
|
||||
{
|
||||
pos: 0,
|
||||
num: 1,
|
||||
|
@ -329,22 +329,22 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
|
||||
for _, s := range scenarios {
|
||||
t.Run(s.description, func(t *testing.T) {
|
||||
if len(s.backwardInterjections) > 0 {
|
||||
interjections, bInterjections, _ := bidirectionalCompareSpans(s.spansA, s.spansB)
|
||||
require.Equal(t, s.interjections, interjections)
|
||||
require.Equal(t, s.backwardInterjections, bInterjections)
|
||||
if len(s.bInserts) > 0 {
|
||||
fInserts, bInserts, _ := expandSpansBothWays(s.spansA, s.spansB)
|
||||
require.Equal(t, s.fInserts, fInserts)
|
||||
require.Equal(t, s.bInserts, bInserts)
|
||||
}
|
||||
|
||||
interjections, valid := forwardCompareSpans(s.spansA, s.spansB)
|
||||
if len(s.backwardInterjections) > 0 {
|
||||
inserts, valid := expandSpansForward(s.spansA, s.spansB)
|
||||
if len(s.bInserts) > 0 {
|
||||
require.False(t, valid, "compareScan unexpectedly returned true")
|
||||
return
|
||||
}
|
||||
require.True(t, valid, "compareScan unexpectedly returned false")
|
||||
require.Equal(t, s.interjections, interjections)
|
||||
require.Equal(t, s.fInserts, inserts)
|
||||
|
||||
gotBuckets := make([]int64, len(s.bucketsOut))
|
||||
interject(s.bucketsIn, gotBuckets, interjections, true)
|
||||
insert(s.bucketsIn, gotBuckets, inserts, true)
|
||||
require.Equal(t, s.bucketsOut, gotBuckets)
|
||||
|
||||
floatBucketsIn := make([]float64, len(s.bucketsIn))
|
||||
|
@ -362,7 +362,7 @@ func TestCompareSpansAndInterject(t *testing.T) {
|
|||
floatBucketsOut[i] = float64(last)
|
||||
}
|
||||
gotFloatBuckets := make([]float64, len(floatBucketsOut))
|
||||
interject(floatBucketsIn, gotFloatBuckets, interjections, false)
|
||||
insert(floatBucketsIn, gotFloatBuckets, inserts, false)
|
||||
require.Equal(t, floatBucketsOut, gotFloatBuckets)
|
||||
})
|
||||
}
|
||||
|
@ -564,12 +564,12 @@ func TestSpansFromBidirectionalCompareSpans(t *testing.T) {
|
|||
copy(s1c, c.s1)
|
||||
copy(s2c, c.s2)
|
||||
|
||||
_, _, act := bidirectionalCompareSpans(c.s1, c.s2)
|
||||
_, _, act := expandSpansBothWays(c.s1, c.s2)
|
||||
require.Equal(t, c.exp, act)
|
||||
// Check that s1 and s2 are not modified.
|
||||
require.Equal(t, s1c, c.s1)
|
||||
require.Equal(t, s2c, c.s2)
|
||||
_, _, act = bidirectionalCompareSpans(c.s2, c.s1)
|
||||
_, _, act = expandSpansBothWays(c.s2, c.s1)
|
||||
require.Equal(t, c.exp, act)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,9 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
|
|||
h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14)
|
||||
h.NegativeBuckets = []int64{4, -1, 1, -1} // counts: 4, 3, 4, 4 (total 15)
|
||||
app.AppendHistogram(ts, h)
|
||||
exp = append(exp, result{t: ts, h: h, fh: h.ToFloat()})
|
||||
hExp := h.Copy()
|
||||
hExp.CounterResetHint = histogram.NotCounterReset
|
||||
exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat()})
|
||||
require.Equal(t, 2, c.NumSamples())
|
||||
|
||||
// Add update with new appender.
|
||||
|
@ -82,7 +84,9 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
|
|||
h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27)
|
||||
h.NegativeBuckets = []int64{5, 1, -2, 3} // counts: 5, 6, 4, 7 (total 22)
|
||||
app.AppendHistogram(ts, h)
|
||||
exp = append(exp, result{t: ts, h: h, fh: h.ToFloat()})
|
||||
hExp = h.Copy()
|
||||
hExp.CounterResetHint = histogram.NotCounterReset
|
||||
exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat()})
|
||||
require.Equal(t, 3, c.NumSamples())
|
||||
|
||||
// 1. Expand iterator in simple case.
|
||||
|
@ -149,7 +153,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
|
|||
require.Equal(t, ValNone, it4.Seek(exp[len(exp)-1].t+1))
|
||||
}
|
||||
|
||||
// Mimics the scenario described for compareSpans().
|
||||
// Mimics the scenario described for expandSpansForward.
|
||||
func TestHistogramChunkBucketChanges(t *testing.T) {
|
||||
c := Chunk(NewHistogramChunk())
|
||||
|
||||
|
@ -220,9 +224,11 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
|
|||
h1.PositiveBuckets = []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1}
|
||||
h1.NegativeSpans = h2.NegativeSpans
|
||||
h1.NegativeBuckets = []int64{0, 1}
|
||||
hExp := h2.Copy()
|
||||
hExp.CounterResetHint = histogram.NotCounterReset
|
||||
exp := []result{
|
||||
{t: ts1, h: h1, fh: h1.ToFloat()},
|
||||
{t: ts2, h: h2, fh: h2.ToFloat()},
|
||||
{t: ts2, h: hExp, fh: hExp.ToFloat()},
|
||||
}
|
||||
it := c.Iterator(nil)
|
||||
var act []result
|
||||
|
@ -463,11 +469,12 @@ func TestAtFloatHistogram(t *testing.T) {
|
|||
NegativeBuckets: []float64{1, 2, 1, 2, 2, 2, 2},
|
||||
},
|
||||
{
|
||||
Schema: 0,
|
||||
Count: 36,
|
||||
Sum: 2345.6,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
CounterResetHint: histogram.NotCounterReset,
|
||||
Schema: 0,
|
||||
Count: 36,
|
||||
Sum: 2345.6,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
|
@ -482,11 +489,12 @@ func TestAtFloatHistogram(t *testing.T) {
|
|||
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
|
||||
},
|
||||
{
|
||||
Schema: 0,
|
||||
Count: 36,
|
||||
Sum: 1111.1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
CounterResetHint: histogram.NotCounterReset,
|
||||
Schema: 0,
|
||||
Count: 36,
|
||||
Sum: 1111.1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
|
|
|
@ -1308,17 +1308,31 @@ func TestHeadCompactionWithHistograms(t *testing.T) {
|
|||
|
||||
minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() }
|
||||
ctx := context.Background()
|
||||
appendHistogram := func(lbls labels.Labels, from, to int, h *histogram.Histogram, exp *[]tsdbutil.Sample) {
|
||||
appendHistogram := func(
|
||||
lbls labels.Labels, from, to int, h *histogram.Histogram, exp *[]tsdbutil.Sample,
|
||||
) {
|
||||
t.Helper()
|
||||
app := head.Appender(ctx)
|
||||
for tsMinute := from; tsMinute <= to; tsMinute++ {
|
||||
var err error
|
||||
if floatTest {
|
||||
_, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat())
|
||||
*exp = append(*exp, sample{t: minute(tsMinute), fh: h.ToFloat()})
|
||||
efh := h.ToFloat()
|
||||
if tsMinute == from {
|
||||
efh.CounterResetHint = histogram.UnknownCounterReset
|
||||
} else {
|
||||
efh.CounterResetHint = histogram.NotCounterReset
|
||||
}
|
||||
*exp = append(*exp, sample{t: minute(tsMinute), fh: efh})
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, lbls, minute(tsMinute), h, nil)
|
||||
*exp = append(*exp, sample{t: minute(tsMinute), h: h.Copy()})
|
||||
eh := h.Copy()
|
||||
if tsMinute == from {
|
||||
eh.CounterResetHint = histogram.UnknownCounterReset
|
||||
} else {
|
||||
eh.CounterResetHint = histogram.NotCounterReset
|
||||
}
|
||||
*exp = append(*exp, sample{t: minute(tsMinute), h: eh})
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
118
tsdb/db_test.go
118
tsdb/db_test.go
|
@ -5836,16 +5836,23 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
|||
})
|
||||
|
||||
ctx := context.Background()
|
||||
appendHistogram := func(lbls labels.Labels, tsMinute int, h *histogram.Histogram, exp *[]tsdbutil.Sample) {
|
||||
appendHistogram := func(
|
||||
lbls labels.Labels, tsMinute int, h *histogram.Histogram,
|
||||
exp *[]tsdbutil.Sample, expCRH histogram.CounterResetHint,
|
||||
) {
|
||||
t.Helper()
|
||||
var err error
|
||||
app := db.Appender(ctx)
|
||||
if floatHistogram {
|
||||
_, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat())
|
||||
*exp = append(*exp, sample{t: minute(tsMinute), fh: h.ToFloat()})
|
||||
efh := h.ToFloat()
|
||||
efh.CounterResetHint = expCRH
|
||||
*exp = append(*exp, sample{t: minute(tsMinute), fh: efh})
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, lbls, minute(tsMinute), h.Copy(), nil)
|
||||
*exp = append(*exp, sample{t: minute(tsMinute), h: h.Copy()})
|
||||
eh := h.Copy()
|
||||
eh.CounterResetHint = expCRH
|
||||
*exp = append(*exp, sample{t: minute(tsMinute), h: eh})
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -5897,23 +5904,23 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
|||
t.Run("series with only histograms", func(t *testing.T) {
|
||||
h := baseH.Copy() // This is shared across all sub tests.
|
||||
|
||||
appendHistogram(series1, 100, h, &exp1)
|
||||
appendHistogram(series1, 100, h, &exp1, histogram.UnknownCounterReset)
|
||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||
|
||||
h.PositiveBuckets[0]++
|
||||
h.NegativeBuckets[0] += 2
|
||||
h.Count += 10
|
||||
appendHistogram(series1, 101, h, &exp1)
|
||||
appendHistogram(series1, 101, h, &exp1, histogram.NotCounterReset)
|
||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||
|
||||
t.Run("changing schema", func(t *testing.T) {
|
||||
h.Schema = 2
|
||||
appendHistogram(series1, 102, h, &exp1)
|
||||
appendHistogram(series1, 102, h, &exp1, histogram.UnknownCounterReset)
|
||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||
|
||||
// Schema back to old.
|
||||
h.Schema = 1
|
||||
appendHistogram(series1, 103, h, &exp1)
|
||||
appendHistogram(series1, 103, h, &exp1, histogram.UnknownCounterReset)
|
||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||
})
|
||||
|
||||
|
@ -5942,7 +5949,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
|||
h.PositiveSpans[1].Length++
|
||||
h.PositiveBuckets = append(h.PositiveBuckets, 1)
|
||||
h.Count += 3
|
||||
appendHistogram(series1, 104, h, &exp1)
|
||||
appendHistogram(series1, 104, h, &exp1, histogram.NotCounterReset)
|
||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||
|
||||
// Because of the previous two histograms being on the active chunk,
|
||||
|
@ -5980,14 +5987,14 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
|||
h.Count += 3
|
||||
// {2, 1, -1, 0, 1} -> {2, 1, 0, -1, 0, 1}
|
||||
h.PositiveBuckets = append(h.PositiveBuckets[:2], append([]int64{0}, h.PositiveBuckets[2:]...)...)
|
||||
appendHistogram(series1, 105, h, &exp1)
|
||||
appendHistogram(series1, 105, h, &exp1, histogram.NotCounterReset)
|
||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||
|
||||
// We add 4 more histograms to clear out the buffer and see the re-encoded histograms.
|
||||
appendHistogram(series1, 106, h, &exp1)
|
||||
appendHistogram(series1, 107, h, &exp1)
|
||||
appendHistogram(series1, 108, h, &exp1)
|
||||
appendHistogram(series1, 109, h, &exp1)
|
||||
appendHistogram(series1, 106, h, &exp1, histogram.NotCounterReset)
|
||||
appendHistogram(series1, 107, h, &exp1, histogram.NotCounterReset)
|
||||
appendHistogram(series1, 108, h, &exp1, histogram.NotCounterReset)
|
||||
appendHistogram(series1, 109, h, &exp1, histogram.NotCounterReset)
|
||||
|
||||
// Update the expected histograms to reflect the re-encoding.
|
||||
if floatHistogram {
|
||||
|
@ -6020,7 +6027,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
|||
t.Run("buckets disappearing", func(t *testing.T) {
|
||||
h.PositiveSpans[1].Length--
|
||||
h.PositiveBuckets = h.PositiveBuckets[:len(h.PositiveBuckets)-1]
|
||||
appendHistogram(series1, 110, h, &exp1)
|
||||
appendHistogram(series1, 110, h, &exp1, histogram.CounterReset)
|
||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||
})
|
||||
})
|
||||
|
@ -6032,9 +6039,9 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
|||
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
||||
|
||||
h := baseH.Copy()
|
||||
appendHistogram(series2, 103, h, &exp2)
|
||||
appendHistogram(series2, 104, h, &exp2)
|
||||
appendHistogram(series2, 105, h, &exp2)
|
||||
appendHistogram(series2, 103, h, &exp2, histogram.UnknownCounterReset)
|
||||
appendHistogram(series2, 104, h, &exp2, histogram.NotCounterReset)
|
||||
appendHistogram(series2, 105, h, &exp2, histogram.NotCounterReset)
|
||||
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
||||
|
||||
// Switching between float and histograms again.
|
||||
|
@ -6042,16 +6049,16 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
|||
appendFloat(series2, 107, 107, &exp2)
|
||||
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
||||
|
||||
appendHistogram(series2, 108, h, &exp2)
|
||||
appendHistogram(series2, 109, h, &exp2)
|
||||
appendHistogram(series2, 108, h, &exp2, histogram.UnknownCounterReset)
|
||||
appendHistogram(series2, 109, h, &exp2, histogram.NotCounterReset)
|
||||
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
||||
})
|
||||
|
||||
t.Run("series starting with histogram and then getting float", func(t *testing.T) {
|
||||
h := baseH.Copy()
|
||||
appendHistogram(series3, 101, h, &exp3)
|
||||
appendHistogram(series3, 102, h, &exp3)
|
||||
appendHistogram(series3, 103, h, &exp3)
|
||||
appendHistogram(series3, 101, h, &exp3, histogram.UnknownCounterReset)
|
||||
appendHistogram(series3, 102, h, &exp3, histogram.NotCounterReset)
|
||||
appendHistogram(series3, 103, h, &exp3, histogram.NotCounterReset)
|
||||
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
||||
|
||||
appendFloat(series3, 104, 100, &exp3)
|
||||
|
@ -6060,8 +6067,8 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
|||
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
||||
|
||||
// Switching between histogram and float again.
|
||||
appendHistogram(series3, 107, h, &exp3)
|
||||
appendHistogram(series3, 108, h, &exp3)
|
||||
appendHistogram(series3, 107, h, &exp3, histogram.UnknownCounterReset)
|
||||
appendHistogram(series3, 108, h, &exp3, histogram.NotCounterReset)
|
||||
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
||||
|
||||
appendFloat(series3, 109, 106, &exp3)
|
||||
|
@ -6091,7 +6098,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
|
|||
t.Helper()
|
||||
|
||||
opts := DefaultOptions()
|
||||
opts.AllowOverlappingCompaction = true // TODO(jesus.vazquez) This replaced AllowOverlappingBlocks, make sure that works
|
||||
opts.AllowOverlappingCompaction = true // TODO(jesusvazquez): This replaced AllowOverlappingBlocks, make sure that works.
|
||||
db := openTestDB(t, opts, nil)
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
|
@ -6137,7 +6144,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
|
|||
q, err := db.Querier(ctx, math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
res := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
|
||||
require.Equal(t, exp, res)
|
||||
compareSeries(t, exp, res)
|
||||
|
||||
// Compact all the blocks together and query again.
|
||||
blocks := db.Blocks()
|
||||
|
@ -6154,10 +6161,23 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
|
|||
q, err = db.Querier(ctx, math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
res = query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
|
||||
require.Equal(t, exp, res)
|
||||
|
||||
// After compaction, we do not require "unknown" counter resets
|
||||
// due to origin from different overlapping chunks anymore.
|
||||
for _, ss := range exp {
|
||||
for i, s := range ss[1:] {
|
||||
if s.H() != nil && ss[i].H() != nil && s.H().CounterResetHint == histogram.UnknownCounterReset {
|
||||
s.H().CounterResetHint = histogram.NotCounterReset
|
||||
}
|
||||
if s.FH() != nil && ss[i].FH() != nil && s.FH().CounterResetHint == histogram.UnknownCounterReset {
|
||||
s.FH().CounterResetHint = histogram.NotCounterReset
|
||||
}
|
||||
}
|
||||
}
|
||||
compareSeries(t, exp, res)
|
||||
}
|
||||
|
||||
for _, floatHistogram := range []bool{true} {
|
||||
for _, floatHistogram := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("floatHistogram=%t", floatHistogram), func(t *testing.T) {
|
||||
t.Run("serial blocks with only histograms", func(t *testing.T) {
|
||||
testBlockQuerying(t,
|
||||
|
@ -6272,3 +6292,45 @@ func TestNativeHistogramFlag(t *testing.T) {
|
|||
l.String(): {sample{t: 200, h: h}, sample{t: 205, fh: h.ToFloat()}},
|
||||
}, act)
|
||||
}
|
||||
|
||||
// compareSeries essentially replaces `require.Equal(t, expected, actual) in
|
||||
// situations where the actual series might contain more counter reset hints
|
||||
// "unknown" than the expected series. This can easily happen for long series
|
||||
// that trigger new chunks. This function therefore tolerates counter reset
|
||||
// hints "CounterReset" and "NotCounterReset" in an expected series where the
|
||||
// actual series contains a counter reset hint "UnknownCounterReset".
|
||||
// "GaugeType" hints are still strictly checked, and any "UnknownCounterReset"
|
||||
// in an expected series has to be matched precisely by the actual series.
|
||||
func compareSeries(t require.TestingT, expected, actual map[string][]tsdbutil.Sample) {
|
||||
if len(expected) != len(actual) {
|
||||
// The reason for the difference is not the counter reset hints
|
||||
// (alone), so let's use the pretty diffing by the require
|
||||
// package.
|
||||
require.Equal(t, expected, actual, "number of series differs")
|
||||
}
|
||||
for key, eSamples := range expected {
|
||||
aSamples, ok := actual[key]
|
||||
if !ok {
|
||||
require.Equal(t, expected, actual, "expected series %q not found", key)
|
||||
}
|
||||
if len(eSamples) != len(aSamples) {
|
||||
require.Equal(t, eSamples, aSamples, "number of samples for series %q differs", key)
|
||||
}
|
||||
for i, eS := range eSamples {
|
||||
aS := aSamples[i]
|
||||
aH, eH := aS.H(), eS.H()
|
||||
aFH, eFH := aS.FH(), eS.FH()
|
||||
switch {
|
||||
case aH != nil && eH != nil && aH.CounterResetHint == histogram.UnknownCounterReset && eH.CounterResetHint != histogram.GaugeType:
|
||||
eH = eH.Copy()
|
||||
eH.CounterResetHint = histogram.UnknownCounterReset
|
||||
eS = sample{t: eS.T(), h: eH}
|
||||
case aFH != nil && eFH != nil && aFH.CounterResetHint == histogram.UnknownCounterReset && eFH.CounterResetHint != histogram.GaugeType:
|
||||
eFH = eFH.Copy()
|
||||
eFH.CounterResetHint = histogram.UnknownCounterReset
|
||||
eS = sample{t: eS.T(), fh: eFH}
|
||||
}
|
||||
require.Equal(t, eS, aS, "sample %d in series %q differs", i, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
101
tsdb/head.go
101
tsdb/head.go
|
@ -17,7 +17,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -2037,103 +2036,3 @@ func (h *Head) updateWALReplayStatusRead(current int) {
|
|||
|
||||
h.stats.WALReplayStatus.Current = current
|
||||
}
|
||||
|
||||
func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
|
||||
for i := 0; i < n; i++ {
|
||||
r = append(r, &histogram.Histogram{
|
||||
Count: 10 + uint64(i*8),
|
||||
ZeroCount: 2 + uint64(i),
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: 1,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||
})
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
|
||||
for x := 0; x < n; x++ {
|
||||
i := rand.Intn(n)
|
||||
r = append(r, &histogram.Histogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Count: 10 + uint64(i*8),
|
||||
ZeroCount: 2 + uint64(i),
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: 1,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||
})
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
||||
for i := 0; i < n; i++ {
|
||||
r = append(r, &histogram.FloatHistogram{
|
||||
Count: 10 + float64(i*8),
|
||||
ZeroCount: 2 + float64(i),
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: 1,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
|
||||
})
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
||||
for x := 0; x < n; x++ {
|
||||
i := rand.Intn(n)
|
||||
r = append(r, &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Count: 10 + float64(i*8),
|
||||
ZeroCount: 2 + float64(i),
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: 1,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
|
||||
})
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
|
|
@ -1141,7 +1141,6 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper
|
|||
|
||||
// appendHistogram adds the histogram.
|
||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||
// TODO(codesome): Support gauge histograms here.
|
||||
func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
||||
// Head controls the execution of recoding, so that we own the proper
|
||||
// chunk reference afterwards. We check for Appendable from appender before
|
||||
|
@ -1150,44 +1149,54 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
|||
// meta properly.
|
||||
app, _ := s.app.(*chunkenc.HistogramAppender)
|
||||
var (
|
||||
positiveInterjections, negativeInterjections []chunkenc.Interjection
|
||||
pBackwardInter, nBackwardInter []chunkenc.Interjection
|
||||
pMergedSpans, nMergedSpans []histogram.Span
|
||||
okToAppend, counterReset bool
|
||||
pForwardInserts, nForwardInserts []chunkenc.Insert
|
||||
pBackwardInserts, nBackwardInserts []chunkenc.Insert
|
||||
pMergedSpans, nMergedSpans []histogram.Span
|
||||
okToAppend, counterReset, gauge bool
|
||||
)
|
||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
||||
if !sampleInOrder {
|
||||
return sampleInOrder, chunkCreated
|
||||
}
|
||||
gauge := h.CounterResetHint == histogram.GaugeType
|
||||
if app != nil {
|
||||
if gauge {
|
||||
positiveInterjections, negativeInterjections, pBackwardInter, nBackwardInter, pMergedSpans, nMergedSpans, okToAppend = app.AppendableGauge(h)
|
||||
} else {
|
||||
positiveInterjections, negativeInterjections, okToAppend, counterReset = app.Appendable(h)
|
||||
switch h.CounterResetHint {
|
||||
case histogram.GaugeType:
|
||||
gauge = true
|
||||
if app != nil {
|
||||
pForwardInserts, nForwardInserts,
|
||||
pBackwardInserts, nBackwardInserts,
|
||||
pMergedSpans, nMergedSpans,
|
||||
okToAppend = app.AppendableGauge(h)
|
||||
}
|
||||
case histogram.CounterReset:
|
||||
// The caller tells us this is a counter reset, even if it
|
||||
// doesn't look like one.
|
||||
counterReset = true
|
||||
default:
|
||||
if app != nil {
|
||||
pForwardInserts, nForwardInserts, okToAppend, counterReset = app.Appendable(h)
|
||||
}
|
||||
}
|
||||
|
||||
if !chunkCreated {
|
||||
if len(pBackwardInter)+len(nBackwardInter) > 0 {
|
||||
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
|
||||
h.PositiveSpans = pMergedSpans
|
||||
h.NegativeSpans = nMergedSpans
|
||||
app.RecodeHistogram(h, pBackwardInter, nBackwardInter)
|
||||
app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts)
|
||||
}
|
||||
// We have 3 cases here
|
||||
// - !okToAppend -> We need to cut a new chunk.
|
||||
// - okToAppend but we have interjections → Existing chunk needs
|
||||
// - okToAppend but we have inserts → Existing chunk needs
|
||||
// recoding before we can append our histogram.
|
||||
// - okToAppend and no interjections → Chunk is ready to support our histogram.
|
||||
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
||||
if !okToAppend || counterReset {
|
||||
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
||||
chunkCreated = true
|
||||
} else if len(positiveInterjections) > 0 || len(negativeInterjections) > 0 {
|
||||
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
|
||||
// New buckets have appeared. We need to recode all
|
||||
// prior histogram samples within the chunk before we
|
||||
// can process this one.
|
||||
chunk, app := app.Recode(
|
||||
positiveInterjections, negativeInterjections,
|
||||
pForwardInserts, nForwardInserts,
|
||||
h.PositiveSpans, h.NegativeSpans,
|
||||
)
|
||||
c.chunk = chunk
|
||||
|
@ -1233,45 +1242,54 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
|||
// meta properly.
|
||||
app, _ := s.app.(*chunkenc.FloatHistogramAppender)
|
||||
var (
|
||||
positiveInterjections, negativeInterjections []chunkenc.Interjection
|
||||
pBackwardInter, nBackwardInter []chunkenc.Interjection
|
||||
pMergedSpans, nMergedSpans []histogram.Span
|
||||
okToAppend, counterReset bool
|
||||
pForwardInserts, nForwardInserts []chunkenc.Insert
|
||||
pBackwardInserts, nBackwardInserts []chunkenc.Insert
|
||||
pMergedSpans, nMergedSpans []histogram.Span
|
||||
okToAppend, counterReset, gauge bool
|
||||
)
|
||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
|
||||
if !sampleInOrder {
|
||||
return sampleInOrder, chunkCreated
|
||||
}
|
||||
gauge := fh.CounterResetHint == histogram.GaugeType
|
||||
if app != nil {
|
||||
if gauge {
|
||||
positiveInterjections, negativeInterjections, pBackwardInter, nBackwardInter,
|
||||
pMergedSpans, nMergedSpans, okToAppend = app.AppendableGauge(fh)
|
||||
} else {
|
||||
positiveInterjections, negativeInterjections, okToAppend, counterReset = app.Appendable(fh)
|
||||
switch fh.CounterResetHint {
|
||||
case histogram.GaugeType:
|
||||
gauge = true
|
||||
if app != nil {
|
||||
pForwardInserts, nForwardInserts,
|
||||
pBackwardInserts, nBackwardInserts,
|
||||
pMergedSpans, nMergedSpans,
|
||||
okToAppend = app.AppendableGauge(fh)
|
||||
}
|
||||
case histogram.CounterReset:
|
||||
// The caller tells us this is a counter reset, even if it
|
||||
// doesn't look like one.
|
||||
counterReset = true
|
||||
default:
|
||||
if app != nil {
|
||||
pForwardInserts, nForwardInserts, okToAppend, counterReset = app.Appendable(fh)
|
||||
}
|
||||
}
|
||||
|
||||
if !chunkCreated {
|
||||
if len(pBackwardInter)+len(nBackwardInter) > 0 {
|
||||
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
|
||||
fh.PositiveSpans = pMergedSpans
|
||||
fh.NegativeSpans = nMergedSpans
|
||||
app.RecodeHistogramm(fh, pBackwardInter, nBackwardInter)
|
||||
app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts)
|
||||
}
|
||||
// We have 3 cases here
|
||||
// - !okToAppend -> We need to cut a new chunk.
|
||||
// - okToAppend but we have interjections → Existing chunk needs
|
||||
// - okToAppend but we have inserts → Existing chunk needs
|
||||
// recoding before we can append our histogram.
|
||||
// - okToAppend and no interjections → Chunk is ready to support our histogram.
|
||||
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
||||
if !okToAppend || counterReset {
|
||||
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
|
||||
chunkCreated = true
|
||||
} else if len(positiveInterjections) > 0 || len(negativeInterjections) > 0 {
|
||||
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
|
||||
// New buckets have appeared. We need to recode all
|
||||
// prior histogram samples within the chunk before we
|
||||
// can process this one.
|
||||
chunk, app := app.Recode(
|
||||
positiveInterjections, negativeInterjections,
|
||||
pForwardInserts, nForwardInserts,
|
||||
fh.PositiveSpans, fh.NegativeSpans,
|
||||
)
|
||||
c.chunk = chunk
|
||||
|
|
|
@ -197,7 +197,6 @@ func BenchmarkLoadWAL(b *testing.B) {
|
|||
continue
|
||||
}
|
||||
lastExemplarsPerSeries = exemplarsPerSeries
|
||||
// fmt.Println("exemplars per series: ", exemplarsPerSeries)
|
||||
b.Run(fmt.Sprintf("batches=%d,seriesPerBatch=%d,samplesPerSeries=%d,exemplarsPerSeries=%d,mmappedChunkT=%d", c.batches, c.seriesPerBatch, c.samplesPerSeries, exemplarsPerSeries, c.mmappedChunkT),
|
||||
func(b *testing.B) {
|
||||
dir := b.TempDir()
|
||||
|
@ -1344,7 +1343,7 @@ func TestMemSeries_appendHistogram(t *testing.T) {
|
|||
|
||||
s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled)
|
||||
|
||||
histograms := GenerateTestHistograms(4)
|
||||
histograms := tsdbutil.GenerateTestHistograms(4)
|
||||
histogramWithOneMoreBucket := histograms[3].Copy()
|
||||
histogramWithOneMoreBucket.Count++
|
||||
histogramWithOneMoreBucket.Sum += 1.23
|
||||
|
@ -2834,17 +2833,13 @@ func TestAppendHistogram(t *testing.T) {
|
|||
ingestTs := int64(0)
|
||||
app := head.Appender(context.Background())
|
||||
|
||||
type timedHistogram struct {
|
||||
t int64
|
||||
h *histogram.Histogram
|
||||
}
|
||||
expHistograms := make([]timedHistogram, 0, 2*numHistograms)
|
||||
expHistograms := make([]tsdbutil.Sample, 0, 2*numHistograms)
|
||||
|
||||
// Counter integer histograms.
|
||||
for _, h := range GenerateTestHistograms(numHistograms) {
|
||||
for _, h := range tsdbutil.GenerateTestHistograms(numHistograms) {
|
||||
_, err := app.AppendHistogram(0, l, ingestTs, h, nil)
|
||||
require.NoError(t, err)
|
||||
expHistograms = append(expHistograms, timedHistogram{ingestTs, h})
|
||||
expHistograms = append(expHistograms, sample{t: ingestTs, h: h})
|
||||
ingestTs++
|
||||
if ingestTs%50 == 0 {
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -2853,10 +2848,10 @@ func TestAppendHistogram(t *testing.T) {
|
|||
}
|
||||
|
||||
// Gauge integer histograms.
|
||||
for _, h := range GenerateTestGaugeHistograms(numHistograms) {
|
||||
for _, h := range tsdbutil.GenerateTestGaugeHistograms(numHistograms) {
|
||||
_, err := app.AppendHistogram(0, l, ingestTs, h, nil)
|
||||
require.NoError(t, err)
|
||||
expHistograms = append(expHistograms, timedHistogram{ingestTs, h})
|
||||
expHistograms = append(expHistograms, sample{t: ingestTs, h: h})
|
||||
ingestTs++
|
||||
if ingestTs%50 == 0 {
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -2864,17 +2859,13 @@ func TestAppendHistogram(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type timedFloatHistogram struct {
|
||||
t int64
|
||||
h *histogram.FloatHistogram
|
||||
}
|
||||
expFloatHistograms := make([]timedFloatHistogram, 0, 2*numHistograms)
|
||||
expFloatHistograms := make([]tsdbutil.Sample, 0, 2*numHistograms)
|
||||
|
||||
// Counter float histograms.
|
||||
for _, fh := range GenerateTestFloatHistograms(numHistograms) {
|
||||
for _, fh := range tsdbutil.GenerateTestFloatHistograms(numHistograms) {
|
||||
_, err := app.AppendHistogram(0, l, ingestTs, nil, fh)
|
||||
require.NoError(t, err)
|
||||
expFloatHistograms = append(expFloatHistograms, timedFloatHistogram{ingestTs, fh})
|
||||
expFloatHistograms = append(expFloatHistograms, sample{t: ingestTs, fh: fh})
|
||||
ingestTs++
|
||||
if ingestTs%50 == 0 {
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -2883,10 +2874,10 @@ func TestAppendHistogram(t *testing.T) {
|
|||
}
|
||||
|
||||
// Gauge float histograms.
|
||||
for _, fh := range GenerateTestGaugeFloatHistograms(numHistograms) {
|
||||
for _, fh := range tsdbutil.GenerateTestGaugeFloatHistograms(numHistograms) {
|
||||
_, err := app.AppendHistogram(0, l, ingestTs, nil, fh)
|
||||
require.NoError(t, err)
|
||||
expFloatHistograms = append(expFloatHistograms, timedFloatHistogram{ingestTs, fh})
|
||||
expFloatHistograms = append(expFloatHistograms, sample{t: ingestTs, fh: fh})
|
||||
ingestTs++
|
||||
if ingestTs%50 == 0 {
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -2909,20 +2900,28 @@ func TestAppendHistogram(t *testing.T) {
|
|||
require.False(t, ss.Next())
|
||||
|
||||
it := s.Iterator(nil)
|
||||
actHistograms := make([]timedHistogram, 0, len(expHistograms))
|
||||
actFloatHistograms := make([]timedFloatHistogram, 0, len(expFloatHistograms))
|
||||
actHistograms := make([]tsdbutil.Sample, 0, len(expHistograms))
|
||||
actFloatHistograms := make([]tsdbutil.Sample, 0, len(expFloatHistograms))
|
||||
for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
|
||||
if typ == chunkenc.ValHistogram {
|
||||
ts, h := it.AtHistogram()
|
||||
actHistograms = append(actHistograms, timedHistogram{ts, h})
|
||||
actHistograms = append(actHistograms, sample{t: ts, h: h})
|
||||
} else if typ == chunkenc.ValFloatHistogram {
|
||||
ts, fh := it.AtFloatHistogram()
|
||||
actFloatHistograms = append(actFloatHistograms, timedFloatHistogram{ts, fh})
|
||||
actFloatHistograms = append(actFloatHistograms, sample{t: ts, fh: fh})
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, expHistograms, actHistograms)
|
||||
require.Equal(t, expFloatHistograms, actFloatHistograms)
|
||||
compareSeries(
|
||||
t,
|
||||
map[string][]tsdbutil.Sample{"dummy": expHistograms},
|
||||
map[string][]tsdbutil.Sample{"dummy": actHistograms},
|
||||
)
|
||||
compareSeries(
|
||||
t,
|
||||
map[string][]tsdbutil.Sample{"dummy": expFloatHistograms},
|
||||
map[string][]tsdbutil.Sample{"dummy": actFloatHistograms},
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -2945,9 +2944,9 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
|||
app = head.Appender(context.Background())
|
||||
var hists []*histogram.Histogram
|
||||
if gauge {
|
||||
hists = GenerateTestGaugeHistograms(numHistograms)
|
||||
hists = tsdbutil.GenerateTestGaugeHistograms(numHistograms)
|
||||
} else {
|
||||
hists = GenerateTestHistograms(numHistograms)
|
||||
hists = tsdbutil.GenerateTestHistograms(numHistograms)
|
||||
}
|
||||
for _, h := range hists {
|
||||
h.Count = h.Count * 2
|
||||
|
@ -2968,9 +2967,9 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
|||
app = head.Appender(context.Background())
|
||||
var hists []*histogram.FloatHistogram
|
||||
if gauge {
|
||||
hists = GenerateTestGaugeFloatHistograms(numHistograms)
|
||||
hists = tsdbutil.GenerateTestGaugeFloatHistograms(numHistograms)
|
||||
} else {
|
||||
hists = GenerateTestFloatHistograms(numHistograms)
|
||||
hists = tsdbutil.GenerateTestFloatHistograms(numHistograms)
|
||||
}
|
||||
for _, h := range hists {
|
||||
h.Count = h.Count * 2
|
||||
|
@ -3008,9 +3007,9 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
|||
app = head.Appender(context.Background())
|
||||
var hists []*histogram.Histogram
|
||||
if gauge {
|
||||
hists = GenerateTestGaugeHistograms(100)
|
||||
hists = tsdbutil.GenerateTestGaugeHistograms(100)
|
||||
} else {
|
||||
hists = GenerateTestHistograms(100)
|
||||
hists = tsdbutil.GenerateTestHistograms(100)
|
||||
}
|
||||
for _, h := range hists {
|
||||
ts++
|
||||
|
@ -3019,7 +3018,12 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
|||
h.NegativeBuckets = h.PositiveBuckets
|
||||
_, err := app.AppendHistogram(0, s2, int64(ts), h, nil)
|
||||
require.NoError(t, err)
|
||||
exp[k2] = append(exp[k2], sample{t: int64(ts), h: h.Copy()})
|
||||
eh := h.Copy()
|
||||
if !gauge && ts > 30 && (ts-10)%20 == 1 {
|
||||
// Need "unknown" hint after float sample.
|
||||
eh.CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
exp[k2] = append(exp[k2], sample{t: int64(ts), h: eh})
|
||||
if ts%20 == 0 {
|
||||
require.NoError(t, app.Commit())
|
||||
app = head.Appender(context.Background())
|
||||
|
@ -3040,9 +3044,9 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
|||
app = head.Appender(context.Background())
|
||||
var hists []*histogram.FloatHistogram
|
||||
if gauge {
|
||||
hists = GenerateTestGaugeFloatHistograms(100)
|
||||
hists = tsdbutil.GenerateTestGaugeFloatHistograms(100)
|
||||
} else {
|
||||
hists = GenerateTestFloatHistograms(100)
|
||||
hists = tsdbutil.GenerateTestFloatHistograms(100)
|
||||
}
|
||||
for _, h := range hists {
|
||||
ts++
|
||||
|
@ -3051,7 +3055,12 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
|||
h.NegativeBuckets = h.PositiveBuckets
|
||||
_, err := app.AppendHistogram(0, s2, int64(ts), nil, h)
|
||||
require.NoError(t, err)
|
||||
exp[k2] = append(exp[k2], sample{t: int64(ts), fh: h.Copy()})
|
||||
eh := h.Copy()
|
||||
if !gauge && ts > 30 && (ts-10)%20 == 1 {
|
||||
// Need "unknown" hint after float sample.
|
||||
eh.CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
exp[k2] = append(exp[k2], sample{t: int64(ts), fh: eh})
|
||||
if ts%20 == 0 {
|
||||
require.NoError(t, app.Commit())
|
||||
app = head.Appender(context.Background())
|
||||
|
@ -3089,7 +3098,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
|||
q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime())
|
||||
require.NoError(t, err)
|
||||
act := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "a", "b.*"))
|
||||
require.Equal(t, exp, act)
|
||||
compareSeries(t, exp, act)
|
||||
}
|
||||
testQuery()
|
||||
|
||||
|
@ -3413,14 +3422,14 @@ func TestHistogramMetrics(t *testing.T) {
|
|||
for x := 0; x < 5; x++ {
|
||||
expHSeries++
|
||||
l := labels.FromStrings("a", fmt.Sprintf("b%d", x))
|
||||
for i, h := range GenerateTestHistograms(numHistograms) {
|
||||
for i, h := range tsdbutil.GenerateTestHistograms(numHistograms) {
|
||||
app := head.Appender(context.Background())
|
||||
_, err := app.AppendHistogram(0, l, int64(i), h, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
expHSamples++
|
||||
}
|
||||
for i, fh := range GenerateTestFloatHistograms(numHistograms) {
|
||||
for i, fh := range tsdbutil.GenerateTestFloatHistograms(numHistograms) {
|
||||
app := head.Appender(context.Background())
|
||||
_, err := app.AppendHistogram(0, l, int64(numHistograms+i), nil, fh)
|
||||
require.NoError(t, err)
|
||||
|
@ -3506,6 +3515,11 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
|
|||
ah.fh.Sum = 0
|
||||
eh.fh = eh.fh.Copy()
|
||||
eh.fh.Sum = 0
|
||||
} else if i > 0 {
|
||||
prev := expHistograms[i-1]
|
||||
if prev.fh == nil || value.IsStaleNaN(prev.fh.Sum) {
|
||||
eh.fh.CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
}
|
||||
require.Equal(t, eh, ah)
|
||||
} else {
|
||||
|
@ -3516,6 +3530,11 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
|
|||
ah.h.Sum = 0
|
||||
eh.h = eh.h.Copy()
|
||||
eh.h.Sum = 0
|
||||
} else if i > 0 {
|
||||
prev := expHistograms[i-1]
|
||||
if prev.h == nil || value.IsStaleNaN(prev.h.Sum) {
|
||||
eh.h.CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
}
|
||||
require.Equal(t, eh, ah)
|
||||
}
|
||||
|
@ -3525,7 +3544,7 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
|
|||
|
||||
// Adding stale in the same appender.
|
||||
app := head.Appender(context.Background())
|
||||
for _, h := range GenerateTestHistograms(numHistograms) {
|
||||
for _, h := range tsdbutil.GenerateTestHistograms(numHistograms) {
|
||||
var err error
|
||||
if floatHistogram {
|
||||
_, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), nil, h.ToFloat())
|
||||
|
@ -3554,7 +3573,7 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
|
|||
|
||||
// Adding stale in different appender and continuing series after a stale sample.
|
||||
app = head.Appender(context.Background())
|
||||
for _, h := range GenerateTestHistograms(2 * numHistograms)[numHistograms:] {
|
||||
for _, h := range tsdbutil.GenerateTestHistograms(2 * numHistograms)[numHistograms:] {
|
||||
var err error
|
||||
if floatHistogram {
|
||||
_, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), nil, h.ToFloat())
|
||||
|
@ -3633,7 +3652,7 @@ func TestHistogramCounterResetHeader(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
h := GenerateTestHistograms(1)[0]
|
||||
h := tsdbutil.GenerateTestHistograms(1)[0]
|
||||
h.PositiveBuckets = []int64{100, 1, 1, 1}
|
||||
h.NegativeBuckets = []int64{100, 1, 1, 1}
|
||||
h.Count = 1000
|
||||
|
@ -3710,8 +3729,8 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
|||
})
|
||||
db.DisableCompactions()
|
||||
|
||||
hists := GenerateTestHistograms(10)
|
||||
floatHists := GenerateTestFloatHistograms(10)
|
||||
hists := tsdbutil.GenerateTestHistograms(10)
|
||||
floatHists := tsdbutil.GenerateTestFloatHistograms(10)
|
||||
lbls := labels.FromStrings("a", "b")
|
||||
|
||||
var expResult []tsdbutil.Sample
|
||||
|
@ -3730,8 +3749,10 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
|||
// If this is empty, samples above will be taken instead of this.
|
||||
addToExp []tsdbutil.Sample
|
||||
}{
|
||||
// Histograms that end up in the expected samples are copied here so that we
|
||||
// can independently set the CounterResetHint later.
|
||||
{
|
||||
samples: []tsdbutil.Sample{sample{t: 100, h: hists[1]}},
|
||||
samples: []tsdbutil.Sample{sample{t: 100, h: hists[0].Copy()}},
|
||||
expChunks: 1,
|
||||
},
|
||||
{
|
||||
|
@ -3739,23 +3760,23 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
|||
expChunks: 2,
|
||||
},
|
||||
{
|
||||
samples: []tsdbutil.Sample{sample{t: 210, fh: floatHists[1]}},
|
||||
samples: []tsdbutil.Sample{sample{t: 210, fh: floatHists[0].Copy()}},
|
||||
expChunks: 3,
|
||||
},
|
||||
{
|
||||
samples: []tsdbutil.Sample{sample{t: 220, h: hists[1]}},
|
||||
samples: []tsdbutil.Sample{sample{t: 220, h: hists[1].Copy()}},
|
||||
expChunks: 4,
|
||||
},
|
||||
{
|
||||
samples: []tsdbutil.Sample{sample{t: 230, fh: floatHists[3]}},
|
||||
samples: []tsdbutil.Sample{sample{t: 230, fh: floatHists[3].Copy()}},
|
||||
expChunks: 5,
|
||||
},
|
||||
{
|
||||
samples: []tsdbutil.Sample{sample{t: 100, h: hists[2]}},
|
||||
samples: []tsdbutil.Sample{sample{t: 100, h: hists[2].Copy()}},
|
||||
err: storage.ErrOutOfOrderSample,
|
||||
},
|
||||
{
|
||||
samples: []tsdbutil.Sample{sample{t: 300, h: hists[3]}},
|
||||
samples: []tsdbutil.Sample{sample{t: 300, h: hists[3].Copy()}},
|
||||
expChunks: 6,
|
||||
},
|
||||
{
|
||||
|
@ -3763,7 +3784,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
|||
err: storage.ErrOutOfOrderSample,
|
||||
},
|
||||
{
|
||||
samples: []tsdbutil.Sample{sample{t: 100, fh: floatHists[4]}},
|
||||
samples: []tsdbutil.Sample{sample{t: 100, fh: floatHists[4].Copy()}},
|
||||
err: storage.ErrOutOfOrderSample,
|
||||
},
|
||||
{
|
||||
|
@ -3789,7 +3810,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
|||
},
|
||||
addToExp: []tsdbutil.Sample{
|
||||
sample{t: 800, v: 8},
|
||||
sample{t: 900, h: hists[9]},
|
||||
sample{t: 900, h: hists[9].Copy()},
|
||||
},
|
||||
expChunks: 8, // float64 added to old chunk, only 1 new for histograms.
|
||||
},
|
||||
|
@ -3800,7 +3821,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
|||
sample{t: 1100, h: hists[9]},
|
||||
},
|
||||
addToExp: []tsdbutil.Sample{
|
||||
sample{t: 1100, h: hists[9]},
|
||||
sample{t: 1100, h: hists[9].Copy()},
|
||||
},
|
||||
expChunks: 8,
|
||||
},
|
||||
|
@ -3830,6 +3851,14 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
|||
require.NoError(t, app.Rollback())
|
||||
}
|
||||
}
|
||||
for i, s := range expResult[1:] {
|
||||
switch {
|
||||
case s.H() != nil && expResult[i].H() == nil:
|
||||
s.(sample).h.CounterResetHint = histogram.UnknownCounterReset
|
||||
case s.FH() != nil && expResult[i].FH() == nil:
|
||||
s.(sample).fh.CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
}
|
||||
|
||||
// Query back and expect same order of samples.
|
||||
q, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64)
|
||||
|
@ -4329,7 +4358,7 @@ func TestHistogramValidation(t *testing.T) {
|
|||
errMsgFloat string // To be considered for float histogram only if it is non-empty.
|
||||
}{
|
||||
"valid histogram": {
|
||||
h: GenerateTestHistograms(1)[0],
|
||||
h: tsdbutil.GenerateTestHistograms(1)[0],
|
||||
},
|
||||
"rejects histogram who has too few negative buckets": {
|
||||
h: &histogram.Histogram{
|
||||
|
@ -4612,7 +4641,7 @@ func TestGaugeHistogramWALAndChunkHeader(t *testing.T) {
|
|||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
hists := GenerateTestGaugeHistograms(5)
|
||||
hists := tsdbutil.GenerateTestGaugeHistograms(5)
|
||||
hists[0].CounterResetHint = histogram.UnknownCounterReset
|
||||
appendHistogram(hists[0])
|
||||
appendHistogram(hists[1])
|
||||
|
@ -4687,7 +4716,7 @@ func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) {
|
|||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
hists := GenerateTestGaugeFloatHistograms(5)
|
||||
hists := tsdbutil.GenerateTestGaugeFloatHistograms(5)
|
||||
hists[0].CounterResetHint = histogram.UnknownCounterReset
|
||||
appendHistogram(hists[0])
|
||||
appendHistogram(hists[1])
|
||||
|
|
|
@ -498,6 +498,12 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
|
|||
h.metrics.chunksCreated.Add(float64(len(mmc) + len(oooMmc)))
|
||||
h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks)))
|
||||
h.metrics.chunks.Add(float64(len(mmc) + len(oooMmc) - len(mSeries.mmappedChunks)))
|
||||
|
||||
if mSeries.ooo != nil {
|
||||
h.metrics.chunksRemoved.Add(float64(len(mSeries.ooo.oooMmappedChunks)))
|
||||
h.metrics.chunks.Sub(float64(len(mSeries.ooo.oooMmappedChunks)))
|
||||
}
|
||||
|
||||
mSeries.mmappedChunks = mmc
|
||||
if len(oooMmc) == 0 {
|
||||
mSeries.ooo = nil
|
||||
|
|
|
@ -72,6 +72,8 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
|
|||
ca.Append(s.Get(i).T(), s.Get(i).V())
|
||||
case chunkenc.ValHistogram:
|
||||
ca.AppendHistogram(s.Get(i).T(), s.Get(i).H())
|
||||
case chunkenc.ValFloatHistogram:
|
||||
ca.AppendFloatHistogram(s.Get(i).T(), s.Get(i).FH())
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
|
||||
}
|
||||
|
@ -128,12 +130,18 @@ func PopulatedChunk(numSamples int, minTime int64) chunks.Meta {
|
|||
|
||||
// GenerateSamples starting at start and counting up numSamples.
|
||||
func GenerateSamples(start, numSamples int) []Sample {
|
||||
samples := make([]Sample, 0, numSamples)
|
||||
for i := start; i < start+numSamples; i++ {
|
||||
samples = append(samples, sample{
|
||||
return generateSamples(start, numSamples, func(i int) Sample {
|
||||
return sample{
|
||||
t: int64(i),
|
||||
v: float64(i),
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func generateSamples(start, numSamples int, gen func(int) Sample) []Sample {
|
||||
samples := make([]Sample, 0, numSamples)
|
||||
for i := start; i < start+numSamples; i++ {
|
||||
samples = append(samples, gen(i))
|
||||
}
|
||||
return samples
|
||||
}
|
||||
|
|
110
tsdb/tsdbutil/histogram.go
Normal file
110
tsdb/tsdbutil/histogram.go
Normal file
|
@ -0,0 +1,110 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tsdbutil
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
)
|
||||
|
||||
func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
|
||||
for i := 0; i < n; i++ {
|
||||
h := GenerateTestHistogram(i)
|
||||
if i > 0 {
|
||||
h.CounterResetHint = histogram.NotCounterReset
|
||||
}
|
||||
r = append(r, h)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// GenerateTestHistogram but it is up to the user to set any known counter reset hint.
|
||||
func GenerateTestHistogram(i int) *histogram.Histogram {
|
||||
return &histogram.Histogram{
|
||||
Count: 10 + uint64(i*8),
|
||||
ZeroCount: 2 + uint64(i),
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: 1,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
|
||||
for x := 0; x < n; x++ {
|
||||
r = append(r, GenerateTestGaugeHistogram(rand.Intn(n)))
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func GenerateTestGaugeHistogram(i int) *histogram.Histogram {
|
||||
h := GenerateTestHistogram(i)
|
||||
h.CounterResetHint = histogram.GaugeType
|
||||
return h
|
||||
}
|
||||
|
||||
func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
||||
for i := 0; i < n; i++ {
|
||||
h := GenerateTestFloatHistogram(i)
|
||||
if i > 0 {
|
||||
h.CounterResetHint = histogram.NotCounterReset
|
||||
}
|
||||
r = append(r, h)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// GenerateTestFloatHistogram but it is up to the user to set any known counter reset hint.
|
||||
func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram {
|
||||
return &histogram.FloatHistogram{
|
||||
Count: 10 + float64(i*8),
|
||||
ZeroCount: 2 + float64(i),
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: 1,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
||||
for x := 0; x < n; x++ {
|
||||
r = append(r, GenerateTestGaugeFloatHistogram(rand.Intn(n)))
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram {
|
||||
h := GenerateTestFloatHistogram(i)
|
||||
h.CounterResetHint = histogram.GaugeType
|
||||
return h
|
||||
}
|
|
@ -16,6 +16,7 @@ package strutil
|
|||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/grafana/regexp"
|
||||
)
|
||||
|
@ -38,6 +39,26 @@ func GraphLinkForExpression(expr string) string {
|
|||
|
||||
// SanitizeLabelName replaces anything that doesn't match
|
||||
// client_label.LabelNameRE with an underscore.
|
||||
// Note: this does not handle all Prometheus label name restrictions (such as
|
||||
// not starting with a digit 0-9), and hence should only be used if the label
|
||||
// name is prefixed with a known valid string.
|
||||
func SanitizeLabelName(name string) string {
|
||||
return invalidLabelCharRE.ReplaceAllString(name, "_")
|
||||
}
|
||||
|
||||
// SanitizeFullLabelName replaces any invalid character with an underscore, and
|
||||
// if given an empty string, returns a string containing a single underscore.
|
||||
func SanitizeFullLabelName(name string) string {
|
||||
if len(name) == 0 {
|
||||
return "_"
|
||||
}
|
||||
var validSb strings.Builder
|
||||
for i, b := range name {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
validSb.WriteRune('_')
|
||||
} else {
|
||||
validSb.WriteRune(b)
|
||||
}
|
||||
}
|
||||
return validSb.String()
|
||||
}
|
||||
|
|
|
@ -59,3 +59,21 @@ func TestSanitizeLabelName(t *testing.T) {
|
|||
expected = "barClient_LABEL____"
|
||||
require.Equal(t, expected, actual, "SanitizeLabelName failed for label (%s)", expected)
|
||||
}
|
||||
|
||||
func TestSanitizeFullLabelName(t *testing.T) {
|
||||
actual := SanitizeFullLabelName("fooClientLABEL")
|
||||
expected := "fooClientLABEL"
|
||||
require.Equal(t, expected, actual, "SanitizeFullLabelName failed for label (%s)", expected)
|
||||
|
||||
actual = SanitizeFullLabelName("barClient.LABEL$$##")
|
||||
expected = "barClient_LABEL____"
|
||||
require.Equal(t, expected, actual, "SanitizeFullLabelName failed for label (%s)", expected)
|
||||
|
||||
actual = SanitizeFullLabelName("0zerothClient1LABEL")
|
||||
expected = "_zerothClient1LABEL"
|
||||
require.Equal(t, expected, actual, "SanitizeFullLabelName failed for label (%s)", expected)
|
||||
|
||||
actual = SanitizeFullLabelName("")
|
||||
expected = "_"
|
||||
require.Equal(t, expected, actual, "SanitizeFullLabelName failed for the empty label")
|
||||
}
|
||||
|
|
|
@ -59,6 +59,10 @@ type status string
|
|||
const (
|
||||
statusSuccess status = "success"
|
||||
statusError status = "error"
|
||||
|
||||
// Non-standard status code (originally introduced by nginx) for the case when a client closes
|
||||
// the connection while the server is still processing the request.
|
||||
statusClientClosedConnection = 499
|
||||
)
|
||||
|
||||
type errorType string
|
||||
|
@ -601,6 +605,10 @@ func returnAPIError(err error) *apiError {
|
|||
return &apiError{errorInternal, err}
|
||||
}
|
||||
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return &apiError{errorCanceled, err}
|
||||
}
|
||||
|
||||
return &apiError{errorExec, err}
|
||||
}
|
||||
|
||||
|
@ -1119,11 +1127,12 @@ type AlertDiscovery struct {
|
|||
|
||||
// Alert has info for an alert.
|
||||
type Alert struct {
|
||||
Labels labels.Labels `json:"labels"`
|
||||
Annotations labels.Labels `json:"annotations"`
|
||||
State string `json:"state"`
|
||||
ActiveAt *time.Time `json:"activeAt,omitempty"`
|
||||
Value string `json:"value"`
|
||||
Labels labels.Labels `json:"labels"`
|
||||
Annotations labels.Labels `json:"annotations"`
|
||||
State string `json:"state"`
|
||||
ActiveAt *time.Time `json:"activeAt,omitempty"`
|
||||
KeepFiringSince *time.Time `json:"keepFiringSince,omitempty"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
func (api *API) alerts(r *http.Request) apiFuncResult {
|
||||
|
@ -1152,6 +1161,9 @@ func rulesAlertsToAPIAlerts(rulesAlerts []*rules.Alert) []*Alert {
|
|||
ActiveAt: &ruleAlert.ActiveAt,
|
||||
Value: strconv.FormatFloat(ruleAlert.Value, 'e', -1, 64),
|
||||
}
|
||||
if !ruleAlert.KeepFiringSince.IsZero() {
|
||||
apiAlerts[i].KeepFiringSince = &ruleAlert.KeepFiringSince
|
||||
}
|
||||
}
|
||||
|
||||
return apiAlerts
|
||||
|
@ -1249,6 +1261,7 @@ type AlertingRule struct {
|
|||
Name string `json:"name"`
|
||||
Query string `json:"query"`
|
||||
Duration float64 `json:"duration"`
|
||||
KeepFiringFor float64 `json:"keepFiringFor"`
|
||||
Labels labels.Labels `json:"labels"`
|
||||
Annotations labels.Labels `json:"annotations"`
|
||||
Alerts []*Alert `json:"alerts"`
|
||||
|
@ -1311,6 +1324,7 @@ func (api *API) rules(r *http.Request) apiFuncResult {
|
|||
Name: rule.Name(),
|
||||
Query: rule.Query().String(),
|
||||
Duration: rule.HoldDuration().Seconds(),
|
||||
KeepFiringFor: rule.KeepFiringFor().Seconds(),
|
||||
Labels: rule.Labels(),
|
||||
Annotations: rule.Annotations(),
|
||||
Alerts: rulesAlertsToAPIAlerts(rule.ActiveAlerts()),
|
||||
|
@ -1626,7 +1640,9 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter
|
|||
code = http.StatusBadRequest
|
||||
case errorExec:
|
||||
code = http.StatusUnprocessableEntity
|
||||
case errorCanceled, errorTimeout:
|
||||
case errorCanceled:
|
||||
code = statusClientClosedConnection
|
||||
case errorTimeout:
|
||||
code = http.StatusServiceUnavailable
|
||||
case errorInternal:
|
||||
code = http.StatusInternalServerError
|
||||
|
|
|
@ -207,6 +207,7 @@ func (m rulesRetrieverMock) AlertingRules() []*rules.AlertingRule {
|
|||
"test_metric3",
|
||||
expr1,
|
||||
time.Second,
|
||||
0,
|
||||
labels.Labels{},
|
||||
labels.Labels{},
|
||||
labels.Labels{},
|
||||
|
@ -218,6 +219,7 @@ func (m rulesRetrieverMock) AlertingRules() []*rules.AlertingRule {
|
|||
"test_metric4",
|
||||
expr2,
|
||||
time.Second,
|
||||
0,
|
||||
labels.Labels{},
|
||||
labels.Labels{},
|
||||
labels.Labels{},
|
||||
|
|
|
@ -58,7 +58,7 @@ func TestApiStatusCodes(t *testing.T) {
|
|||
"promql.ErrQueryCanceled": {
|
||||
err: promql.ErrQueryCanceled("some error"),
|
||||
expectedString: "query was canceled",
|
||||
expectedCode: http.StatusServiceUnavailable,
|
||||
expectedCode: statusClientClosedConnection,
|
||||
},
|
||||
|
||||
"promql.ErrQueryTimeout": {
|
||||
|
@ -76,7 +76,7 @@ func TestApiStatusCodes(t *testing.T) {
|
|||
"context.Canceled": {
|
||||
err: context.Canceled,
|
||||
expectedString: "context canceled",
|
||||
expectedCode: http.StatusUnprocessableEntity,
|
||||
expectedCode: statusClientClosedConnection,
|
||||
},
|
||||
} {
|
||||
for k, q := range map[string]storage.SampleAndChunkQueryable{
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/codemirror-promql",
|
||||
"version": "0.41.0-rc.0",
|
||||
"version": "0.42.0",
|
||||
"description": "a CodeMirror mode for the PromQL language",
|
||||
"types": "dist/esm/index.d.ts",
|
||||
"module": "dist/esm/index.js",
|
||||
|
@ -29,24 +29,24 @@
|
|||
},
|
||||
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
||||
"dependencies": {
|
||||
"@prometheus-io/lezer-promql": "^0.41.0-rc.0",
|
||||
"@prometheus-io/lezer-promql": "^0.42.0",
|
||||
"lru-cache": "^6.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@codemirror/autocomplete": "^6.2.0",
|
||||
"@codemirror/language": "^6.3.0",
|
||||
"@codemirror/lint": "^6.0.0",
|
||||
"@codemirror/state": "^6.1.1",
|
||||
"@codemirror/view": "^6.4.0",
|
||||
"@lezer/common": "^1.0.1",
|
||||
"@lezer/lr": "^1.2.3",
|
||||
"@lezer/highlight": "^1.1.2",
|
||||
"@codemirror/autocomplete": "^6.4.0",
|
||||
"@codemirror/language": "^6.4.0",
|
||||
"@codemirror/lint": "^6.1.0",
|
||||
"@codemirror/state": "^6.2.0",
|
||||
"@codemirror/view": "^6.7.3",
|
||||
"@lezer/common": "^1.0.2",
|
||||
"@lezer/lr": "^1.3.1",
|
||||
"@lezer/highlight": "^1.1.3",
|
||||
"@types/lru-cache": "^5.1.1",
|
||||
"isomorphic-fetch": "^3.0.0",
|
||||
"nock": "^13.2.9"
|
||||
"nock": "^13.3.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@codemirror/autocomplete": "^6.2.0",
|
||||
"@codemirror/autocomplete": "^6.4.0",
|
||||
"@codemirror/language": "^6.3.0",
|
||||
"@codemirror/lint": "^6.0.0",
|
||||
"@codemirror/state": "^6.1.1",
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
{
|
||||
"name": "@prometheus-io/lezer-promql",
|
||||
"version": "0.41.0-rc.0",
|
||||
"version": "0.42.0",
|
||||
"description": "lezer-based PromQL grammar",
|
||||
"main": "index.cjs",
|
||||
"main": "dist/index.cjs",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
"import": "./dist/index.es.js",
|
||||
|
@ -30,9 +30,9 @@
|
|||
"test": "NODE_OPTIONS=--experimental-vm-modules jest"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@lezer/generator": "^1.1.1",
|
||||
"@lezer/lr": "^1.2.3",
|
||||
"@lezer/highlight": "^1.1.2"
|
||||
"@lezer/generator": "^1.2.2",
|
||||
"@lezer/lr": "^1.3.1",
|
||||
"@lezer/highlight": "^1.1.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@lezer/lr": "^1.2.3",
|
||||
|
|
860
web/ui/package-lock.json
generated
860
web/ui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
@ -16,16 +16,16 @@
|
|||
"npm": ">=7.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/jest": "^29.2.2",
|
||||
"@types/jest": "^29.4.0",
|
||||
"@types/node": "^17.0.45",
|
||||
"eslint-config-prettier": "^8.5.0",
|
||||
"eslint-config-prettier": "^8.6.0",
|
||||
"eslint-config-react-app": "^7.0.1",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
"jest-canvas-mock": "^2.4.0",
|
||||
"jest-fetch-mock": "^3.0.3",
|
||||
"react-scripts": "^5.0.1",
|
||||
"prettier": "^2.7.1",
|
||||
"ts-jest": "^29.0.3",
|
||||
"typescript": "^4.8.3"
|
||||
"prettier": "^2.8.3",
|
||||
"ts-jest": "^29.0.5",
|
||||
"typescript": "^4.9.4"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,44 +1,44 @@
|
|||
{
|
||||
"name": "@prometheus-io/app",
|
||||
"version": "0.41.0-rc.0",
|
||||
"version": "0.42.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^6.2.0",
|
||||
"@codemirror/commands": "^6.1.2",
|
||||
"@codemirror/language": "^6.3.0",
|
||||
"@codemirror/lint": "^6.0.0",
|
||||
"@codemirror/search": "^6.2.2",
|
||||
"@codemirror/state": "^6.1.1",
|
||||
"@codemirror/view": "^6.4.0",
|
||||
"@codemirror/autocomplete": "^6.4.0",
|
||||
"@codemirror/commands": "^6.2.0",
|
||||
"@codemirror/language": "^6.4.0",
|
||||
"@codemirror/lint": "^6.1.0",
|
||||
"@codemirror/search": "^6.2.3",
|
||||
"@codemirror/state": "^6.2.0",
|
||||
"@codemirror/view": "^6.7.3",
|
||||
"@forevolve/bootstrap-dark": "^2.1.1",
|
||||
"@fortawesome/fontawesome-svg-core": "6.2.0",
|
||||
"@fortawesome/free-solid-svg-icons": "6.2.0",
|
||||
"@fortawesome/fontawesome-svg-core": "6.2.1",
|
||||
"@fortawesome/free-solid-svg-icons": "6.2.1",
|
||||
"@fortawesome/react-fontawesome": "0.2.0",
|
||||
"@lezer/lr": "^1.2.3",
|
||||
"@lezer/highlight": "^1.1.2",
|
||||
"@lezer/common": "^1.0.1",
|
||||
"@lezer/lr": "^1.3.1",
|
||||
"@lezer/highlight": "^1.1.3",
|
||||
"@lezer/common": "^1.0.2",
|
||||
"@nexucis/fuzzy": "^0.4.1",
|
||||
"@nexucis/kvsearch": "^0.8.1",
|
||||
"@prometheus-io/codemirror-promql": "^0.41.0-rc.0",
|
||||
"@prometheus-io/codemirror-promql": "^0.42.0",
|
||||
"bootstrap": "^4.6.2",
|
||||
"css.escape": "^1.5.1",
|
||||
"downshift": "^7.0.1",
|
||||
"downshift": "^7.2.0",
|
||||
"http-proxy-middleware": "^2.0.6",
|
||||
"jquery": "^3.6.1",
|
||||
"jquery": "^3.6.3",
|
||||
"jquery.flot.tooltip": "^0.9.0",
|
||||
"moment": "^2.29.4",
|
||||
"moment-timezone": "^0.5.38",
|
||||
"moment-timezone": "^0.5.40",
|
||||
"popper.js": "^1.14.3",
|
||||
"react": "^17.0.2",
|
||||
"react-copy-to-clipboard": "^5.1.0",
|
||||
"react-dom": "^17.0.2",
|
||||
"react-infinite-scroll-component": "^6.1.0",
|
||||
"react-resize-detector": "^7.1.2",
|
||||
"react-router-dom": "^5.3.3",
|
||||
"react-router-dom": "^5.3.4",
|
||||
"react-test-renderer": "^17.0.2",
|
||||
"reactstrap": "^8.10.1",
|
||||
"sanitize-html": "^2.7.3",
|
||||
"sass": "1.56.0",
|
||||
"sanitize-html": "^2.8.1",
|
||||
"sass": "1.57.1",
|
||||
"tempusdominus-bootstrap-4": "^5.39.2",
|
||||
"tempusdominus-core": "^5.19.3"
|
||||
},
|
||||
|
@ -68,18 +68,18 @@
|
|||
"@testing-library/react-hooks": "^7.0.2",
|
||||
"@types/enzyme": "^3.10.12",
|
||||
"@types/flot": "0.0.32",
|
||||
"@types/jquery": "^3.5.14",
|
||||
"@types/react": "^17.0.50",
|
||||
"@types/jquery": "^3.5.16",
|
||||
"@types/react": "^17.0.53",
|
||||
"@types/react-copy-to-clipboard": "^5.0.4",
|
||||
"@types/react-dom": "^17.0.17",
|
||||
"@types/react-dom": "^17.0.18",
|
||||
"@types/react-router-dom": "^5.3.3",
|
||||
"@types/sanitize-html": "^2.6.2",
|
||||
"@types/sanitize-html": "^2.8.0",
|
||||
"@types/sinon": "^10.0.13",
|
||||
"@wojtekmaj/enzyme-adapter-react-17": "^0.6.7",
|
||||
"@wojtekmaj/enzyme-adapter-react-17": "^0.8.0",
|
||||
"enzyme": "^3.11.0",
|
||||
"enzyme-to-json": "^3.6.2",
|
||||
"mutationobserver-shim": "^0.3.7",
|
||||
"sinon": "^14.0.1"
|
||||
"sinon": "^14.0.2"
|
||||
},
|
||||
"jest": {
|
||||
"snapshotSerializers": [
|
||||
|
|
|
@ -29,6 +29,7 @@ export interface Alert {
|
|||
value: string;
|
||||
annotations: Record<string, string>;
|
||||
activeAt: string;
|
||||
keepFiringSince: string;
|
||||
}
|
||||
|
||||
interface RuleGroup {
|
||||
|
|
|
@ -43,6 +43,11 @@ const CollapsibleAlertPanel: FC<CollapsibleAlertPanelProps> = ({ rule, showAnnot
|
|||
<div>for: {formatDuration(rule.duration * 1000)}</div>
|
||||
</div>
|
||||
)}
|
||||
{rule.keepFiringFor > 0 && (
|
||||
<div>
|
||||
<div>keep_firing_for: {formatDuration(rule.keepFiringFor * 1000)}</div>
|
||||
</div>
|
||||
)}
|
||||
{rule.labels && Object.keys(rule.labels).length > 0 && (
|
||||
<div>
|
||||
<div>labels:</div>
|
||||
|
@ -91,9 +96,14 @@ const CollapsibleAlertPanel: FC<CollapsibleAlertPanelProps> = ({ rule, showAnnot
|
|||
</td>
|
||||
<td>
|
||||
<h5 className="m-0">
|
||||
<Badge color={alertColors[alert.state] + ' text-uppercase'} className="px-3">
|
||||
<Badge color={alertColors[alert.state] + ' text-uppercase'} className="px-3 mr-1">
|
||||
{alert.state}
|
||||
</Badge>
|
||||
{alert.keepFiringSince && (
|
||||
<Badge color="secondary" className="px-3">
|
||||
Stabilizing
|
||||
</Badge>
|
||||
)}
|
||||
</h5>
|
||||
</td>
|
||||
<td>{alert.activeAt}</td>
|
||||
|
|
|
@ -96,6 +96,11 @@ export const RulesContent: FC<RulesContentProps> = ({ response }) => {
|
|||
<strong>for:</strong> {formatDuration(r.duration * 1000)}
|
||||
</div>
|
||||
)}
|
||||
{r.keepFiringFor > 0 && (
|
||||
<div>
|
||||
<strong>keep_firing_for:</strong> {formatDuration(r.keepFiringFor * 1000)}
|
||||
</div>
|
||||
)}
|
||||
{r.labels && Object.keys(r.labels).length > 0 && (
|
||||
<div>
|
||||
<strong>labels:</strong>
|
||||
|
|
|
@ -26,6 +26,7 @@ export type Rule = {
|
|||
alerts: Alert[];
|
||||
annotations: Record<string, string>;
|
||||
duration: number;
|
||||
keepFiringFor: number;
|
||||
evaluationTime: string;
|
||||
health: string;
|
||||
labels: Record<string, string>;
|
||||
|
|
Loading…
Reference in a new issue