mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge pull request #508 from grafana/sync-upstream-2023-06-30
Sync upstream 2023 06 30
This commit is contained in:
commit
0ad967ebd3
|
@ -13,8 +13,9 @@ linters:
|
|||
- gocritic
|
||||
- gofumpt
|
||||
- goimports
|
||||
- revive
|
||||
- misspell
|
||||
- predeclared
|
||||
- revive
|
||||
- unconvert
|
||||
- unused
|
||||
|
||||
|
@ -30,14 +31,19 @@ issues:
|
|||
|
||||
linters-settings:
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: true
|
||||
packages-with-error-message:
|
||||
- sync/atomic: "Use go.uber.org/atomic instead of sync/atomic"
|
||||
- github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
|
||||
- github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
|
||||
- io/ioutil: "Use corresponding 'os' or 'io' functions instead."
|
||||
- regexp: "Use github.com/grafana/regexp instead of regexp"
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: "sync/atomic"
|
||||
desc: "Use go.uber.org/atomic instead of sync/atomic"
|
||||
- pkg: "github.com/stretchr/testify/assert"
|
||||
desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
|
||||
- pkg: "github.com/go-kit/kit/log"
|
||||
desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
|
||||
- pkg: "io/ioutil"
|
||||
desc: "Use corresponding 'os' or 'io' functions instead."
|
||||
- pkg: "regexp"
|
||||
desc: "Use github.com/grafana/regexp instead of regexp"
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
# Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
|
||||
|
@ -55,3 +61,9 @@ linters-settings:
|
|||
local-prefixes: github.com/prometheus/prometheus
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
revive:
|
||||
rules:
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
|
||||
- name: unused-parameter
|
||||
severity: warning
|
||||
disabled: true
|
||||
|
|
|
@ -20,5 +20,4 @@ rules:
|
|||
config/testdata/section_key_dup.bad.yml
|
||||
line-length: disable
|
||||
truthy:
|
||||
ignore: |
|
||||
.github/workflows/*.yml
|
||||
check-keys: false
|
26
CHANGELOG.md
26
CHANGELOG.md
|
@ -1,5 +1,29 @@
|
|||
# Changelog
|
||||
|
||||
|
||||
## 2.45.0 / 2023-06-23
|
||||
|
||||
This release is a LTS (Long-Term Support) release of Prometheus and will
|
||||
receive security, documentation and bugfix patches for at least 12 months.
|
||||
Please read more about our LTS release cycle at
|
||||
<https://prometheus.io/docs/introduction/release-cycle/>.
|
||||
|
||||
* [FEATURE] API: New limit parameter to limit the number of items returned by `/api/v1/status/tsdb` endpoint. #12336
|
||||
* [FEATURE] Config: Add limits to global config. #12126
|
||||
* [FEATURE] Consul SD: Added support for `path_prefix`. #12372
|
||||
* [FEATURE] Native histograms: Add option to scrape both classic and native histograms. #12350
|
||||
* [FEATURE] Native histograms: Added support for two more arithmetic operators `avg_over_time` and `sum_over_time`. #12262
|
||||
* [FEATURE] Promtool: When providing the block id, only one block will be loaded and analyzed. #12031
|
||||
* [FEATURE] Remote-write: New Azure ad configuration to support remote writing directly to Azure Monitor workspace. #11944
|
||||
* [FEATURE] TSDB: Samples per chunk are now configurable with flag `storage.tsdb.samples-per-chunk`. By default set to its former value 120. #12055
|
||||
* [ENHANCEMENT] Native histograms: bucket size can now be limited to avoid scrape fails. #12254
|
||||
* [ENHANCEMENT] TSDB: Dropped series are now deleted from the WAL sooner. #12297
|
||||
* [BUGFIX] Native histograms: ChunkSeries iterator now checks if a new sample can be appended to the open chunk. #12185
|
||||
* [BUGFIX] Native histograms: Fix Histogram Appender `Appendable()` segfault. #12357
|
||||
* [BUGFIX] Native histograms: Fix setting reset header to gauge histograms in seriesToChunkEncoder. #12329
|
||||
* [BUGFIX] TSDB: Tombstone intervals are not modified after Get() call. #12245
|
||||
* [BUGFIX] TSDB: Use path/filepath to set the WAL directory. #12349
|
||||
|
||||
## 2.44.0 / 2023-05-13
|
||||
|
||||
This version is built with Go tag `stringlabels`, to use the smaller data
|
||||
|
@ -39,7 +63,7 @@ improvements for testing. #10991
|
|||
* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487
|
||||
* [FEATURE] Scrape: Add `scrape_config_files` to include scrape configs from different files. #12019
|
||||
* [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098
|
||||
* [FEATURE] HTTP client: Add `proxy_from_enviroment` to read proxies from env variables. #12098
|
||||
* [FEATURE] HTTP client: Add `proxy_from_environment` to read proxies from env variables. #12098
|
||||
* [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088
|
||||
* [ENHANCEMENT] API: Change HTTP status code from 503/422 to 499 if a request is canceled. #11897
|
||||
* [ENHANCEMENT] Scrape: Allow exemplars for all metric types. #11984
|
||||
|
|
2
Makefile
2
Makefile
|
@ -82,7 +82,7 @@ assets-tarball: assets
|
|||
.PHONY: parser
|
||||
parser:
|
||||
@echo ">> running goyacc to generate the .go file."
|
||||
ifeq (, $(shell which goyacc))
|
||||
ifeq (, $(shell command -v goyacc > /dev/null))
|
||||
@echo "goyacc not installed so skipping"
|
||||
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
|
||||
else
|
||||
|
|
|
@ -49,7 +49,7 @@ endif
|
|||
GOTEST := $(GO) test
|
||||
GOTEST_DIR :=
|
||||
ifneq ($(CIRCLE_JOB),)
|
||||
ifneq ($(shell which gotestsum),)
|
||||
ifneq ($(shell command -v gotestsum > /dev/null),)
|
||||
GOTEST_DIR := test-results
|
||||
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
||||
endif
|
||||
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.51.2
|
||||
GOLANGCI_LINT_VERSION ?= v1.53.3
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
|
@ -180,7 +180,7 @@ endif
|
|||
.PHONY: common-yamllint
|
||||
common-yamllint:
|
||||
@echo ">> running yamllint on all YAML files in the repository"
|
||||
ifeq (, $(shell which yamllint))
|
||||
ifeq (, $(shell command -v yamllint > /dev/null))
|
||||
@echo "yamllint not installed so skipping"
|
||||
else
|
||||
yamllint .
|
||||
|
|
|
@ -34,7 +34,7 @@ The features that distinguish Prometheus from other metrics and monitoring syste
|
|||
|
||||
## Architecture overview
|
||||
|
||||

|
||||

|
||||
|
||||
## Install
|
||||
|
||||
|
|
|
@ -49,8 +49,10 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
|||
| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) |
|
||||
| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v2.45 | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
|
||||
| v2.46 | 2023-07-12 | **searching for volunteer** |
|
||||
| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
|
||||
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.47 | 2023-08-23 | **searching for volunteer** |
|
||||
| v2.48 | 2023-10-04 | **searching for volunteer** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
|
|
@ -429,7 +429,7 @@ func main() {
|
|||
|
||||
_, err := a.Parse(os.Args[1:])
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err))
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err))
|
||||
a.Usage(os.Args[1:])
|
||||
os.Exit(2)
|
||||
}
|
||||
|
|
|
@ -81,6 +81,7 @@ func main() {
|
|||
var (
|
||||
httpRoundTripper = api.DefaultRoundTripper
|
||||
serverURL *url.URL
|
||||
remoteWriteURL *url.URL
|
||||
httpConfigFilePath string
|
||||
)
|
||||
|
||||
|
@ -126,8 +127,8 @@ func main() {
|
|||
checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.")
|
||||
ruleFiles := checkRulesCmd.Arg(
|
||||
"rule-files",
|
||||
"The rule files to check.",
|
||||
).Required().ExistingFiles()
|
||||
"The rule files to check, default is read from standard input.",
|
||||
).ExistingFiles()
|
||||
checkRulesLint := checkRulesCmd.Flag(
|
||||
"lint",
|
||||
"Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
|
||||
|
@ -178,6 +179,18 @@ func main() {
|
|||
queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
|
||||
queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings()
|
||||
|
||||
pushCmd := app.Command("push", "Push to a Prometheus server.")
|
||||
pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||
pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).")
|
||||
pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL)
|
||||
metricFiles := pushMetricsCmd.Arg(
|
||||
"metric-files",
|
||||
"The metric files to push, default is read from standard input.",
|
||||
).ExistingFiles()
|
||||
pushMetricsLabels := pushMetricsCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times.").Default("job=promtool").StringMap()
|
||||
pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration()
|
||||
pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap()
|
||||
|
||||
testCmd := app.Command("test", "Unit testing.")
|
||||
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
|
||||
testRulesFiles := testRulesCmd.Arg(
|
||||
|
@ -301,6 +314,9 @@ func main() {
|
|||
case checkMetricsCmd.FullCommand():
|
||||
os.Exit(CheckMetrics(*checkMetricsExtended))
|
||||
|
||||
case pushMetricsCmd.FullCommand():
|
||||
os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *pushMetricsLabels, *metricFiles...))
|
||||
|
||||
case queryInstantCmd.FullCommand():
|
||||
os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p))
|
||||
|
||||
|
@ -441,20 +457,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files
|
|||
}
|
||||
fmt.Println()
|
||||
|
||||
for _, rf := range ruleFiles {
|
||||
if n, errs := checkRules(rf, lintSettings); len(errs) > 0 {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
for _, err := range errs {
|
||||
fmt.Fprintln(os.Stderr, " ", err)
|
||||
}
|
||||
failed = true
|
||||
for _, err := range errs {
|
||||
hasErrors = hasErrors || !errors.Is(err, lintError)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
||||
}
|
||||
fmt.Println()
|
||||
rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings)
|
||||
if rulesFailed {
|
||||
failed = rulesFailed
|
||||
}
|
||||
if rulesHasErrors {
|
||||
hasErrors = rulesHasErrors
|
||||
}
|
||||
}
|
||||
if failed && hasErrors {
|
||||
|
@ -682,9 +690,57 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) {
|
|||
func CheckRules(ls lintConfig, files ...string) int {
|
||||
failed := false
|
||||
hasErrors := false
|
||||
if len(files) == 0 {
|
||||
fmt.Println("Checking standard input")
|
||||
data, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
rgs, errs := rulefmt.Parse(data)
|
||||
for _, e := range errs {
|
||||
fmt.Fprintln(os.Stderr, e.Error())
|
||||
return failureExitCode
|
||||
}
|
||||
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
for _, e := range errs {
|
||||
fmt.Fprintln(os.Stderr, e.Error())
|
||||
}
|
||||
failed = true
|
||||
for _, err := range errs {
|
||||
hasErrors = hasErrors || !errors.Is(err, lintError)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
||||
}
|
||||
fmt.Println()
|
||||
} else {
|
||||
failed, hasErrors = checkRules(files, ls)
|
||||
}
|
||||
|
||||
if failed && hasErrors {
|
||||
return failureExitCode
|
||||
}
|
||||
if failed && ls.fatal {
|
||||
return lintErrExitCode
|
||||
}
|
||||
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// checkRules validates rule files.
|
||||
func checkRules(files []string, ls lintConfig) (bool, bool) {
|
||||
failed := false
|
||||
hasErrors := false
|
||||
for _, f := range files {
|
||||
if n, errs := checkRules(f, ls); errs != nil {
|
||||
fmt.Println("Checking", f)
|
||||
rgs, errs := rulefmt.ParseFile(f)
|
||||
if errs != nil {
|
||||
failed = true
|
||||
continue
|
||||
}
|
||||
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
for _, e := range errs {
|
||||
fmt.Fprintln(os.Stderr, e.Error())
|
||||
|
@ -698,23 +754,10 @@ func CheckRules(ls lintConfig, files ...string) int {
|
|||
}
|
||||
fmt.Println()
|
||||
}
|
||||
if failed && hasErrors {
|
||||
return failureExitCode
|
||||
}
|
||||
if failed && ls.fatal {
|
||||
return lintErrExitCode
|
||||
}
|
||||
return successExitCode
|
||||
return failed, hasErrors
|
||||
}
|
||||
|
||||
func checkRules(filename string, lintSettings lintConfig) (int, []error) {
|
||||
fmt.Println("Checking", filename)
|
||||
|
||||
rgs, errs := rulefmt.ParseFile(filename)
|
||||
if errs != nil {
|
||||
return successExitCode, errs
|
||||
}
|
||||
|
||||
func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) {
|
||||
numRules := 0
|
||||
for _, rg := range rgs.Groups {
|
||||
numRules += len(rg.Rules)
|
||||
|
|
138
cmd/promtool/metrics.go
Normal file
138
cmd/promtool/metrics.go
Normal file
|
@ -0,0 +1,138 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/storage/remote"
|
||||
"github.com/prometheus/prometheus/util/fmtutil"
|
||||
)
|
||||
|
||||
// Push metrics to a prometheus remote write (for testing purpose only).
|
||||
func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int {
|
||||
addressURL, err := url.Parse(url.String())
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
// build remote write client
|
||||
writeClient, err := remote.NewWriteClient("remote-write", &remote.ClientConfig{
|
||||
URL: &config_util.URL{URL: addressURL},
|
||||
Timeout: model.Duration(timeout),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
// set custom tls config from httpConfigFilePath
|
||||
// set custom headers to every request
|
||||
client, ok := writeClient.(*remote.Client)
|
||||
if !ok {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("unexpected type %T", writeClient))
|
||||
return failureExitCode
|
||||
}
|
||||
client.Client.Transport = &setHeadersTransport{
|
||||
RoundTripper: roundTripper,
|
||||
headers: headers,
|
||||
}
|
||||
|
||||
var data []byte
|
||||
var failed bool
|
||||
|
||||
if len(files) == 0 {
|
||||
data, err = io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
fmt.Printf("Parsing standard input\n")
|
||||
if parseAndPushMetrics(client, data, labels) {
|
||||
fmt.Printf(" SUCCESS: metrics pushed to remote write.\n")
|
||||
return successExitCode
|
||||
}
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
data, err = os.ReadFile(file)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
failed = true
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("Parsing metrics file %s\n", file)
|
||||
if parseAndPushMetrics(client, data, labels) {
|
||||
fmt.Printf(" SUCCESS: metrics file %s pushed to remote write.\n", file)
|
||||
continue
|
||||
}
|
||||
failed = true
|
||||
}
|
||||
|
||||
if failed {
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool {
|
||||
metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
return false
|
||||
}
|
||||
|
||||
raw, err := metricsData.Marshal()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Encode the request body into snappy encoding.
|
||||
compressed := snappy.Encode(nil, raw)
|
||||
err = client.Store(context.Background(), compressed)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type setHeadersTransport struct {
|
||||
http.RoundTripper
|
||||
headers map[string]string
|
||||
}
|
||||
|
||||
func (s *setHeadersTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
for key, value := range s.headers {
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
return s.RoundTripper.RoundTrip(req)
|
||||
}
|
|
@ -398,26 +398,20 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
|
|||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blocks, err := db.Blocks()
|
||||
|
||||
if blockID == "" {
|
||||
blockID, err = db.LastBlockID()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
b, err := db.Block(blockID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var block tsdb.BlockReader
|
||||
switch {
|
||||
case blockID != "":
|
||||
for _, b := range blocks {
|
||||
if b.Meta().ULID.String() == blockID {
|
||||
block = b
|
||||
break
|
||||
}
|
||||
}
|
||||
case len(blocks) > 0:
|
||||
block = blocks[len(blocks)-1]
|
||||
}
|
||||
if block == nil {
|
||||
return nil, nil, fmt.Errorf("block %s not found", blockID)
|
||||
}
|
||||
return db, block, nil
|
||||
|
||||
return db, b, nil
|
||||
}
|
||||
|
||||
func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
"github.com/prometheus/prometheus/storage/remote/azuread"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -267,7 +268,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
|||
for i, scfg := range c.ScrapeConfigs {
|
||||
// We do these checks for library users that would not call Validate in
|
||||
// Unmarshal.
|
||||
if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
|
||||
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -294,7 +295,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
|||
return nil, fileErr(filename, err)
|
||||
}
|
||||
for _, scfg := range cfg.ScrapeConfigs {
|
||||
if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
|
||||
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||
return nil, fileErr(filename, err)
|
||||
}
|
||||
|
||||
|
@ -343,7 +344,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
// Do global overrides and validate unique names.
|
||||
jobNames := map[string]struct{}{}
|
||||
for _, scfg := range c.ScrapeConfigs {
|
||||
if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
|
||||
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -390,6 +391,24 @@ type GlobalConfig struct {
|
|||
QueryLogFile string `yaml:"query_log_file,omitempty"`
|
||||
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
||||
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
||||
// An uncompressed response body larger than this many bytes will cause the
|
||||
// scrape to fail. 0 means no limit.
|
||||
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
|
||||
// More than this many samples post metric-relabeling will cause the scrape to
|
||||
// fail. 0 means no limit.
|
||||
SampleLimit uint `yaml:"sample_limit,omitempty"`
|
||||
// More than this many targets after the target relabeling will cause the
|
||||
// scrapes to fail. 0 means no limit.
|
||||
TargetLimit uint `yaml:"target_limit,omitempty"`
|
||||
// More than this many labels post metric-relabeling will cause the scrape to
|
||||
// fail. 0 means no limit.
|
||||
LabelLimit uint `yaml:"label_limit,omitempty"`
|
||||
// More than this label name length post metric-relabeling will cause the
|
||||
// scrape to fail. 0 means no limit.
|
||||
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
|
||||
// More than this label value length post metric-relabeling will cause the
|
||||
// scrape to fail. 0 means no limit.
|
||||
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -478,19 +497,19 @@ type ScrapeConfig struct {
|
|||
// scrape to fail. 0 means no limit.
|
||||
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
|
||||
// More than this many samples post metric-relabeling will cause the scrape to
|
||||
// fail.
|
||||
// fail. 0 means no limit.
|
||||
SampleLimit uint `yaml:"sample_limit,omitempty"`
|
||||
// More than this many targets after the target relabeling will cause the
|
||||
// scrapes to fail.
|
||||
// scrapes to fail. 0 means no limit.
|
||||
TargetLimit uint `yaml:"target_limit,omitempty"`
|
||||
// More than this many labels post metric-relabeling will cause the scrape to
|
||||
// fail.
|
||||
// fail. 0 means no limit.
|
||||
LabelLimit uint `yaml:"label_limit,omitempty"`
|
||||
// More than this label name length post metric-relabeling will cause the
|
||||
// scrape to fail.
|
||||
// scrape to fail. 0 means no limit.
|
||||
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
|
||||
// More than this label value length post metric-relabeling will cause the
|
||||
// scrape to fail.
|
||||
// scrape to fail. 0 means no limit.
|
||||
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
||||
// More than this many buckets in a native histogram will cause the scrape to
|
||||
// fail.
|
||||
|
@ -552,25 +571,44 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *ScrapeConfig) Validate(defaultInterval, defaultTimeout model.Duration) error {
|
||||
func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||
if c == nil {
|
||||
return errors.New("empty or null scrape config section")
|
||||
}
|
||||
// First set the correct scrape interval, then check that the timeout
|
||||
// (inferred or explicit) is not greater than that.
|
||||
if c.ScrapeInterval == 0 {
|
||||
c.ScrapeInterval = defaultInterval
|
||||
c.ScrapeInterval = globalConfig.ScrapeInterval
|
||||
}
|
||||
if c.ScrapeTimeout > c.ScrapeInterval {
|
||||
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName)
|
||||
}
|
||||
if c.ScrapeTimeout == 0 {
|
||||
if defaultTimeout > c.ScrapeInterval {
|
||||
if globalConfig.ScrapeTimeout > c.ScrapeInterval {
|
||||
c.ScrapeTimeout = c.ScrapeInterval
|
||||
} else {
|
||||
c.ScrapeTimeout = defaultTimeout
|
||||
c.ScrapeTimeout = globalConfig.ScrapeTimeout
|
||||
}
|
||||
}
|
||||
if c.BodySizeLimit == 0 {
|
||||
c.BodySizeLimit = globalConfig.BodySizeLimit
|
||||
}
|
||||
if c.SampleLimit == 0 {
|
||||
c.SampleLimit = globalConfig.SampleLimit
|
||||
}
|
||||
if c.TargetLimit == 0 {
|
||||
c.TargetLimit = globalConfig.TargetLimit
|
||||
}
|
||||
if c.LabelLimit == 0 {
|
||||
c.LabelLimit = globalConfig.LabelLimit
|
||||
}
|
||||
if c.LabelNameLengthLimit == 0 {
|
||||
c.LabelNameLengthLimit = globalConfig.LabelNameLengthLimit
|
||||
}
|
||||
if c.LabelValueLengthLimit == 0 {
|
||||
c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -870,6 +908,7 @@ type RemoteWriteConfig struct {
|
|||
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
|
||||
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
|
||||
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
|
||||
AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -906,8 +945,12 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
|||
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
|
||||
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
||||
|
||||
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
|
||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
||||
if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) {
|
||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
|
||||
}
|
||||
|
||||
if c.SigV4Config != nil && c.AzureADConfig != nil {
|
||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -928,7 +971,7 @@ func validateHeadersForTracing(headers map[string]string) error {
|
|||
func validateHeaders(headers map[string]string) error {
|
||||
for header := range headers {
|
||||
if strings.ToLower(header) == "authorization" {
|
||||
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter")
|
||||
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter")
|
||||
}
|
||||
if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
|
||||
return fmt.Errorf("%s is a reserved header. It must not be changed", header)
|
||||
|
|
|
@ -68,6 +68,15 @@ func mustParseURL(u string) *config.URL {
|
|||
return &config.URL{URL: parsed}
|
||||
}
|
||||
|
||||
const (
|
||||
globBodySizeLimit = 15 * units.MiB
|
||||
globSampleLimit = 1500
|
||||
globTargetLimit = 30
|
||||
globLabelLimit = 30
|
||||
globLabelNameLengthLimit = 200
|
||||
globLabelValueLengthLimit = 200
|
||||
)
|
||||
|
||||
var expectedConf = &Config{
|
||||
GlobalConfig: GlobalConfig{
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
|
@ -76,6 +85,13 @@ var expectedConf = &Config{
|
|||
QueryLogFile: "",
|
||||
|
||||
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
|
||||
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
},
|
||||
|
||||
RuleFiles: []string{
|
||||
|
@ -165,10 +181,16 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "prometheus",
|
||||
|
||||
HonorLabels: true,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorLabels: true,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -261,11 +283,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-x",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(50 * time.Second),
|
||||
ScrapeTimeout: model.Duration(5 * time.Second),
|
||||
BodySizeLimit: 10 * units.MiB,
|
||||
SampleLimit: 1000,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(50 * time.Second),
|
||||
ScrapeTimeout: model.Duration(5 * time.Second),
|
||||
BodySizeLimit: 10 * units.MiB,
|
||||
SampleLimit: 1000,
|
||||
TargetLimit: 35,
|
||||
LabelLimit: 35,
|
||||
LabelNameLengthLimit: 210,
|
||||
LabelValueLengthLimit: 210,
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BasicAuth: &config.BasicAuth{
|
||||
|
@ -352,9 +378,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-y",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -399,9 +431,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-z",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
|
@ -424,9 +462,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-kubernetes",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -455,9 +499,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-kubernetes-namespaces",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -486,9 +536,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-kuma",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -506,9 +562,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-marathon",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -535,9 +597,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-nomad",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -561,9 +629,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-ec2",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -594,9 +668,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-lightsail",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -617,9 +697,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-azure",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -643,9 +729,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-nerve",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -662,9 +754,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "0123service-xxx",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -684,9 +782,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "badfederation",
|
||||
|
||||
HonorTimestamps: false,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: false,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: "/federate",
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -706,9 +810,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "測試",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -728,9 +838,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "httpsd",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -747,9 +863,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-triton",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -774,9 +896,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "digitalocean-droplets",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -800,9 +928,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "docker",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -822,9 +956,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "dockerswarm",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -844,9 +984,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-openstack",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -870,9 +1016,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-puppetdb",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -898,10 +1050,16 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
{
|
||||
JobName: "hetzner",
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
JobName: "hetzner",
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -947,9 +1105,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "service-eureka",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -966,9 +1130,16 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "ovhcloud",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -995,9 +1166,16 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "scaleway",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1030,9 +1208,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "linode-instances",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1057,9 +1241,16 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "uyuni",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1076,10 +1267,16 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
{
|
||||
JobName: "ionos",
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
JobName: "ionos",
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1101,9 +1298,15 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "vultr",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1524,7 +1727,7 @@ var expectedErrors = []struct {
|
|||
},
|
||||
{
|
||||
filename: "remote_write_authorization_header.bad.yml",
|
||||
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`,
|
||||
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
|
||||
},
|
||||
{
|
||||
filename: "remote_write_url_missing.bad.yml",
|
||||
|
|
11
config/testdata/conf.good.yml
vendored
11
config/testdata/conf.good.yml
vendored
|
@ -2,6 +2,12 @@
|
|||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 30s
|
||||
body_size_limit: 15MB
|
||||
sample_limit: 1500
|
||||
target_limit: 30
|
||||
label_limit: 30
|
||||
label_name_length_limit: 200
|
||||
label_value_length_limit: 200
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
external_labels:
|
||||
|
@ -111,6 +117,11 @@ scrape_configs:
|
|||
|
||||
body_size_limit: 10MB
|
||||
sample_limit: 1000
|
||||
target_limit: 35
|
||||
label_limit: 35
|
||||
label_name_length_limit: 210
|
||||
label_value_length_limit: 210
|
||||
|
||||
|
||||
metrics_path: /my_path
|
||||
scheme: https
|
||||
|
|
|
@ -226,8 +226,8 @@ func (d *Discovery) watchFiles() {
|
|||
panic("no watcher configured")
|
||||
}
|
||||
for _, p := range d.paths {
|
||||
if idx := strings.LastIndex(p, "/"); idx > -1 {
|
||||
p = p[:idx]
|
||||
if dir, _ := filepath.Split(p); dir != "" {
|
||||
p = dir
|
||||
} else {
|
||||
p = "./"
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ Tooling for the Prometheus monitoring system.
|
|||
| check | Check the resources for validity. |
|
||||
| query | Run query against a Prometheus server. |
|
||||
| debug | Fetch debug information. |
|
||||
| push | Push to a Prometheus server. |
|
||||
| test | Unit testing. |
|
||||
| tsdb | Run tsdb commands. |
|
||||
|
||||
|
@ -180,9 +181,9 @@ Check if the rule files are valid or not.
|
|||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| rule-files | The rule files to check. | Yes |
|
||||
| Argument | Description |
|
||||
| --- | --- |
|
||||
| rule-files | The rule files to check, default is read from standard input. |
|
||||
|
||||
|
||||
|
||||
|
@ -372,6 +373,48 @@ Fetch all debug information.
|
|||
|
||||
|
||||
|
||||
### `promtool push`
|
||||
|
||||
Push to a Prometheus server.
|
||||
|
||||
|
||||
|
||||
#### Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file for promtool to connect to Prometheus. |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool push metrics`
|
||||
|
||||
Push metrics to a prometheus remote write (for testing purpose only).
|
||||
|
||||
|
||||
|
||||
###### Flags
|
||||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--label</code> | Label to attach to metrics. Can be specified multiple times. | `job=promtool` |
|
||||
| <code class="text-nowrap">--timeout</code> | The time to wait for pushing metrics. | `30s` |
|
||||
| <code class="text-nowrap">--header</code> | Prometheus remote write header. | |
|
||||
|
||||
|
||||
|
||||
|
||||
###### Arguments
|
||||
|
||||
| Argument | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| remote-write-url | Prometheus remote write url to push metrics. | Yes |
|
||||
| metric-files | The metric files to push, default is read from standard input. | |
|
||||
|
||||
|
||||
|
||||
|
||||
### `promtool test`
|
||||
|
||||
Unit testing.
|
||||
|
|
|
@ -32,7 +32,11 @@ groups:
|
|||
```
|
||||
|
||||
The optional `for` clause causes Prometheus to wait for a certain duration
|
||||
between first encountering a new expression output vector element and counting an alert as firing for this element. In this case, Prometheus will check that the alert continues to be active during each evaluation for 10 minutes before firing the alert. Elements that are active, but not firing yet, are in the pending state.
|
||||
between first encountering a new expression output vector element and counting
|
||||
an alert as firing for this element. In this case, Prometheus will check that
|
||||
the alert continues to be active during each evaluation for 10 minutes before
|
||||
firing the alert. Elements that are active, but not firing yet, are in the pending state.
|
||||
Alerting rules without the `for` clause will become active on the first evaluation.
|
||||
|
||||
The `labels` clause allows specifying a set of additional labels to be attached
|
||||
to the alert. Any existing conflicting labels will be overwritten. The label
|
||||
|
|
|
@ -73,6 +73,39 @@ global:
|
|||
# Reloading the configuration will reopen the file.
|
||||
[ query_log_file: <string> ]
|
||||
|
||||
# An uncompressed response body larger than this many bytes will cause the
|
||||
# scrape to fail. 0 means no limit. Example: 100MB.
|
||||
# This is an experimental feature, this behaviour could
|
||||
# change or be removed in the future.
|
||||
[ body_size_limit: <size> | default = 0 ]
|
||||
|
||||
# Per-scrape limit on number of scraped samples that will be accepted.
|
||||
# If more than this number of samples are present after metric relabeling
|
||||
# the entire scrape will be treated as failed. 0 means no limit.
|
||||
[ sample_limit: <int> | default = 0 ]
|
||||
|
||||
# Per-scrape limit on number of labels that will be accepted for a sample. If
|
||||
# more than this number of labels are present post metric-relabeling, the
|
||||
# entire scrape will be treated as failed. 0 means no limit.
|
||||
[ label_limit: <int> | default = 0 ]
|
||||
|
||||
# Per-scrape limit on length of labels name that will be accepted for a sample.
|
||||
# If a label name is longer than this number post metric-relabeling, the entire
|
||||
# scrape will be treated as failed. 0 means no limit.
|
||||
[ label_name_length_limit: <int> | default = 0 ]
|
||||
|
||||
# Per-scrape limit on length of labels value that will be accepted for a sample.
|
||||
# If a label value is longer than this number post metric-relabeling, the
|
||||
# entire scrape will be treated as failed. 0 means no limit.
|
||||
[ label_value_length_limit: <int> | default = 0 ]
|
||||
|
||||
# Per-scrape config limit on number of unique targets that will be
|
||||
# accepted. If more than this number of targets are present after target
|
||||
# relabeling, Prometheus will mark the targets as failed without scraping them.
|
||||
# 0 means no limit. This is an experimental feature, this behaviour could
|
||||
# change in the future.
|
||||
[ target_limit: <int> | default = 0 ]
|
||||
|
||||
# Rule files specifies a list of globs. Rules and alerts are read from
|
||||
# all matching files.
|
||||
rule_files:
|
||||
|
@ -354,6 +387,7 @@ metric_relabel_configs:
|
|||
# This is an experimental feature, this behaviour could
|
||||
# change or be removed in the future.
|
||||
[ body_size_limit: <size> | default = 0 ]
|
||||
|
||||
# Per-scrape limit on number of scraped samples that will be accepted.
|
||||
# If more than this number of samples are present after metric relabeling
|
||||
# the entire scrape will be treated as failed. 0 means no limit.
|
||||
|
@ -394,11 +428,16 @@ Where `<job_name>` must be unique across all scrape configurations.
|
|||
A `tls_config` allows configuring TLS connections.
|
||||
|
||||
```yaml
|
||||
# CA certificate to validate API server certificate with.
|
||||
# CA certificate to validate API server certificate with. At most one of ca and ca_file is allowed.
|
||||
[ ca: <string> ]
|
||||
[ ca_file: <filename> ]
|
||||
|
||||
# Certificate and key files for client cert authentication to the server.
|
||||
# Certificate and key for client cert authentication to the server.
|
||||
# At most one of cert and cert_file is allowed.
|
||||
# At most one of key and key_file is allowed.
|
||||
[ cert: <string> ]
|
||||
[ cert_file: <filename> ]
|
||||
[ key: <secret> ]
|
||||
[ key_file: <filename> ]
|
||||
|
||||
# ServerName extension to indicate the name of the server.
|
||||
|
@ -2127,7 +2166,7 @@ attach_metadata:
|
|||
See [this example Prometheus configuration file](/documentation/examples/prometheus-kubernetes.yml)
|
||||
for a detailed example of configuring Prometheus for Kubernetes.
|
||||
|
||||
You may wish to check out the 3rd party [Prometheus Operator](https://github.com/coreos/prometheus-operator),
|
||||
You may wish to check out the 3rd party [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator),
|
||||
which automates the Prometheus setup on top of Kubernetes.
|
||||
|
||||
### `<kuma_sd_config>`
|
||||
|
@ -3433,7 +3472,7 @@ authorization:
|
|||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optionally configures AWS's Signature Verification 4 signing process to
|
||||
# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2.
|
||||
# sign requests. Cannot be set at the same time as basic_auth, authorization, oauth2, or azuread.
|
||||
# To use the default credentials from the AWS SDK, use `sigv4: {}`.
|
||||
sigv4:
|
||||
# The AWS region. If blank, the region from the default credentials chain
|
||||
|
@ -3452,10 +3491,20 @@ sigv4:
|
|||
[ role_arn: <string> ]
|
||||
|
||||
# Optional OAuth 2.0 configuration.
|
||||
# Cannot be used at the same time as basic_auth, authorization, or sigv4.
|
||||
# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread.
|
||||
oauth2:
|
||||
[ <oauth2> ]
|
||||
|
||||
# Optional AzureAD configuration.
|
||||
# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4.
|
||||
azuread:
|
||||
# The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'.
|
||||
[ cloud: <string> | default = AzurePublic ]
|
||||
|
||||
# Azure User-assigned Managed identity.
|
||||
[ managed_identity:
|
||||
[ client_id: <string> ]
|
||||
|
||||
# Configures the remote write request's TLS settings.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
|
|
|
@ -44,6 +44,13 @@ tls_server_config:
|
|||
# CA certificate for client certificate authentication to the server.
|
||||
[ client_ca_file: <filename> ]
|
||||
|
||||
# Verify that the client certificate has a Subject Alternate Name (SAN)
|
||||
# which is an exact match to an entry in this list, else terminate the
|
||||
# connection. SAN match can be one or multiple of the following: DNS,
|
||||
# IP, e-mail, or URI address from https://pkg.go.dev/crypto/x509#Certificate.
|
||||
[ client_allowed_sans:
|
||||
[ - <string> ] ]
|
||||
|
||||
# Minimum TLS version that is acceptable.
|
||||
[ min_version: <string> | default = "TLS12" ]
|
||||
|
||||
|
|
|
@ -407,7 +407,7 @@ $ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metr
|
|||
"traceID": "EpTxMJ40fUus7aGY"
|
||||
},
|
||||
"value": "6",
|
||||
"timestamp": 1600096945.479,
|
||||
"timestamp": 1600096945.479
|
||||
}
|
||||
]
|
||||
},
|
||||
|
@ -424,15 +424,15 @@ $ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metr
|
|||
"traceID": "Olp9XHlq763ccsfa"
|
||||
},
|
||||
"value": "19",
|
||||
"timestamp": 1600096955.479,
|
||||
"timestamp": 1600096955.479
|
||||
},
|
||||
{
|
||||
"labels": {
|
||||
"traceID": "hCtjygkIHwAN9vs4"
|
||||
},
|
||||
"value": "20",
|
||||
"timestamp": 1600096965.489,
|
||||
},
|
||||
"timestamp": 1600096965.489
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
@ -863,6 +863,7 @@ GET /api/v1/metadata
|
|||
URL query parameters:
|
||||
|
||||
- `limit=<number>`: Maximum number of metrics to return.
|
||||
- `limit_per_metric=<number>`: Maximum number of metadata to return per metric.
|
||||
- `metric=<string>`: A metric name to filter metadata for. All metric metadata is retrieved if left empty.
|
||||
|
||||
The `data` section of the query result consists of an object where each key is a metric name and each value is a list of unique metadata objects, as exposed for that metric name across all targets.
|
||||
|
@ -898,6 +899,32 @@ curl -G http://localhost:9090/api/v1/metadata?limit=2
|
|||
}
|
||||
```
|
||||
|
||||
The following example returns only one metadata entry for each metric.
|
||||
|
||||
```json
|
||||
curl -G http://localhost:9090/api/v1/metadata?limit_per_metric=1
|
||||
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"cortex_ring_tokens": [
|
||||
{
|
||||
"type": "gauge",
|
||||
"help": "Number of tokens in the ring",
|
||||
"unit": ""
|
||||
}
|
||||
],
|
||||
"http_requests_total": [
|
||||
{
|
||||
"type": "counter",
|
||||
"help": "Number of HTTP requests",
|
||||
"unit": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The following example returns metadata only for the metric `http_requests_total`.
|
||||
|
||||
```json
|
||||
|
|
|
@ -359,7 +359,7 @@ increase(http_requests_total{job="api-server"}[5m])
|
|||
```
|
||||
|
||||
`increase` acts on native histograms by calculating a new histogram where each
|
||||
compononent (sum and count of observations, buckets) is the increase between
|
||||
component (sum and count of observations, buckets) is the increase between
|
||||
the respective component in the first and last native histogram in
|
||||
`v`. However, each element in `v` that contains a mix of float and native
|
||||
histogram samples within the range, will be missing from the result vector.
|
||||
|
|
|
@ -7,16 +7,16 @@ require (
|
|||
github.com/go-kit/log v0.2.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.11.0
|
||||
github.com/influxdata/influxdb v1.11.2
|
||||
github.com/prometheus/client_golang v1.15.0
|
||||
github.com/prometheus/common v0.42.0
|
||||
github.com/prometheus/prometheus v0.43.0
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/prometheus/prometheus v0.44.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.217 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.245 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
|
@ -44,11 +44,11 @@ require (
|
|||
go.opentelemetry.io/otel/trace v1.14.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/goleak v1.2.1 // indirect
|
||||
golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/oauth2 v0.6.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
|
||||
golang.org/x/net v0.9.0 // indirect
|
||||
golang.org/x/oauth2 v0.7.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
|
||||
|
@ -19,8 +19,8 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu
|
|||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.217 h1:FcWC56MRl+k756aH3qeMQTylSdeJ58WN0iFz3fkyRz0=
|
||||
github.com/aws/aws-sdk-go v1.44.217/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M=
|
||||
github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
@ -35,15 +35,15 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4=
|
||||
github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY=
|
||||
github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
|
||||
github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8=
|
||||
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
|
@ -100,7 +100,7 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
|||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E=
|
||||
github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||
|
@ -114,13 +114,13 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
|
|||
github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs=
|
||||
github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/influxdata/influxdb v1.11.0 h1:0X+ZsbcOWc6AEi5MHee9BYqXCKmz8IZsljrRYjmV8Qg=
|
||||
github.com/influxdata/influxdb v1.11.0/go.mod h1:V93tJcidY0Zh0LtSONZWnXXGDyt20dtVf+Ddp4EnhaA=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY=
|
||||
github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo=
|
||||
github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
|
@ -146,14 +146,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc=
|
||||
github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo=
|
||||
github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -169,7 +169,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
|
|||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ=
|
||||
github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
|
@ -201,26 +201,22 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
|||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/prometheus v0.43.0 h1:18iCSfrbAHbXvYFvR38U1Pt4uZmU9SmDcCpCrBKUiGg=
|
||||
github.com/prometheus/prometheus v0.43.0/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY=
|
||||
github.com/prometheus/prometheus v0.44.0 h1:sgn8Fdx+uE5tHQn0/622swlk2XnIj6udoZCnbVjHIgc=
|
||||
github.com/prometheus/prometheus v0.44.0/go.mod h1:aPsmIK3py5XammeTguyqTmuqzX/jeCdyOWWobLHNKQg=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
||||
|
@ -245,12 +241,12 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||
golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s=
|
||||
golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -267,12 +263,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw=
|
||||
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
|
||||
golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
|
||||
golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -299,20 +295,20 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
|
||||
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -320,7 +316,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 34 KiB |
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 49 KiB |
|
@ -20,6 +20,20 @@
|
|||
description: 'Prometheus %(prometheusName)s has failed to reload its configuration.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusSDRefreshFailure',
|
||||
expr: |||
|
||||
increase(prometheus_sd_refresh_failures_total{%(prometheusSelector)s}[10m]) > 0
|
||||
||| % $._config,
|
||||
'for': '20m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Failed Prometheus SD refresh.',
|
||||
description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusNotificationQueueRunningFull',
|
||||
expr: |||
|
||||
|
|
100
go.mod
100
go.mod
|
@ -4,20 +4,22 @@ go 1.19
|
|||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.28
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1
|
||||
github.com/Azure/go-autorest/autorest v0.11.29
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23
|
||||
github.com/DmitriyVTitov/size v1.5.0
|
||||
github.com/alecthomas/kingpin/v2 v2.3.2
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
||||
github.com/aws/aws-sdk-go v1.44.245
|
||||
github.com/aws/aws-sdk-go v1.44.284
|
||||
github.com/cespare/xxhash/v2 v2.2.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/dgraph-io/ristretto v0.1.1
|
||||
github.com/digitalocean/godo v1.99.0
|
||||
github.com/docker/docker v23.0.4+incompatible
|
||||
github.com/docker/docker v24.0.2+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.11.0
|
||||
github.com/envoyproxy/protoc-gen-validate v0.10.1
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.1
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/go-kit/log v0.2.1
|
||||
github.com/go-logfmt/logfmt v0.6.0
|
||||
|
@ -25,18 +27,18 @@ require (
|
|||
github.com/go-zookeeper/zk v1.0.3
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/pprof v0.0.0-20230406165453-00490a63f317
|
||||
github.com/gophercloud/gophercloud v1.3.0
|
||||
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751
|
||||
github.com/gophercloud/gophercloud v1.4.0
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.20.0
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197
|
||||
github.com/hashicorp/consul/api v1.21.0
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f
|
||||
github.com/hetznercloud/hcloud-go v1.45.1
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.6
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.7
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
github.com/linode/linodego v1.16.1
|
||||
github.com/miekg/dns v1.1.53
|
||||
github.com/linode/linodego v1.17.0
|
||||
github.com/miekg/dns v1.1.54
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
|
@ -44,34 +46,34 @@ require (
|
|||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/alertmanager v0.25.0
|
||||
github.com/prometheus/client_golang v1.15.1
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/common v0.42.0
|
||||
github.com/prometheus/client_model v0.4.0
|
||||
github.com/prometheus/common v0.44.0
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.9.1
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15
|
||||
github.com/prometheus/exporter-toolkit v0.10.0
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0
|
||||
go.opentelemetry.io/otel v1.14.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0
|
||||
go.opentelemetry.io/otel/sdk v1.14.0
|
||||
go.opentelemetry.io/otel/trace v1.14.0
|
||||
go.uber.org/atomic v1.10.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0
|
||||
go.opentelemetry.io/otel v1.16.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0
|
||||
go.opentelemetry.io/otel/sdk v1.16.0
|
||||
go.opentelemetry.io/otel/trace v1.16.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
go.uber.org/automaxprocs v1.5.2
|
||||
go.uber.org/goleak v1.2.1
|
||||
golang.org/x/net v0.9.0
|
||||
golang.org/x/oauth2 v0.7.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/sys v0.7.0
|
||||
golang.org/x/net v0.11.0
|
||||
golang.org/x/oauth2 v0.9.0
|
||||
golang.org/x/sync v0.2.0
|
||||
golang.org/x/sys v0.9.0
|
||||
golang.org/x/time v0.3.0
|
||||
golang.org/x/tools v0.8.0
|
||||
golang.org/x/tools v0.9.3
|
||||
google.golang.org/api v0.114.0
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4
|
||||
google.golang.org/grpc v1.53.0
|
||||
google.golang.org/genproto v0.0.0-20230320184635-7606e756e683
|
||||
google.golang.org/grpc v1.55.0
|
||||
google.golang.org/protobuf v1.30.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
|
@ -79,21 +81,25 @@ require (
|
|||
k8s.io/apimachinery v0.26.2
|
||||
k8s.io/client-go v0.26.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.90.1
|
||||
k8s.io/klog/v2 v2.100.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.18.0 // indirect
|
||||
cloud.google.com/go/compute v1.19.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
|
@ -104,8 +110,8 @@ require (
|
|||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
|
@ -116,7 +122,7 @@ require (
|
|||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-kit/kit v0.12.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.3 // indirect
|
||||
|
@ -128,18 +134,18 @@ require (
|
|||
github.com/go-openapi/validate v0.22.1 // indirect
|
||||
github.com/go-resty/resty/v2 v2.7.0 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/glog v1.1.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-hclog v1.4.0 // indirect
|
||||
|
@ -171,19 +177,19 @@ require (
|
|||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.mongodb.org/mongo-driver v1.11.3 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
golang.org/x/crypto v0.7.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29
|
||||
golang.org/x/crypto v0.10.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/term v0.7.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/term v0.9.0 // indirect
|
||||
golang.org/x/text v0.10.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
|
||||
k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
|
|
193
go.sum
193
go.sum
|
@ -19,8 +19,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
|||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY=
|
||||
cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
|
||||
cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ=
|
||||
cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
|
@ -38,13 +38,19 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
|
||||
github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
|
||||
github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
|
@ -60,6 +66,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
|
|||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
|
@ -100,8 +108,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
|
|||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M=
|
||||
github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.284 h1:Oc5Kubi43/VCkerlt3ZU3KpBju6BpNkoG3s7E8vj/O8=
|
||||
github.com/aws/aws-sdk-go v1.44.284/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -111,8 +119,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
|||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
|
@ -132,8 +140,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH
|
|||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 h1:XP+uhjN0yBCN/tPkr8Z0BNDc5rZam9RG6UWyf2FrSQ0=
|
||||
github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 h1:zlUubfBUxApscKFsF4VSvvfhsBNTBu0eF/ddvpo96yk=
|
||||
github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
|
@ -160,8 +168,8 @@ github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib
|
|||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek=
|
||||
github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg=
|
||||
github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
|
@ -188,8 +196,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.
|
|||
github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o=
|
||||
github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
|
@ -224,8 +232,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
|||
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
|
||||
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
|
||||
|
@ -301,12 +309,12 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
|
|||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -375,8 +383,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ=
|
||||
github.com/google/pprof v0.0.0-20230406165453-00490a63f317/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
|
||||
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs=
|
||||
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -389,8 +397,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
|||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A=
|
||||
github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
|
||||
github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8=
|
||||
github.com/gophercloud/gophercloud v1.3.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU=
|
||||
github.com/gophercloud/gophercloud v1.4.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
|
@ -406,11 +414,11 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 h1:1JYBfzqrWPcCclBwxFCPAou9n+q86mfnu7NAeHfte7A=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0/go.mod h1:YDZoGHuwE+ov0c8smSH49WLF3F2LaWnYYuDVd+EWrc0=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc=
|
||||
github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo=
|
||||
github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM=
|
||||
github.com/hashicorp/consul/api v1.21.0/go.mod h1:f8zVJwBcLdr1IQnfdfszjUM0xzp31Zl3bpws3pL9uFM=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY=
|
||||
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
|
||||
|
@ -459,8 +467,8 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/
|
|||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
|
||||
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCrNvonL09r7EiQ6M2rNt+Cmjbn1QbzchFoTWJFpj4=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f h1:yxjcAZRuYymIDC0W4IQHgTe9EQdu2BsjPlVmKwyVZT4=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f/go.mod h1:Xjd3OXUTfsWbCCBsQd3EdfPTz5evDi+fxqdvpN+WqQg=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
|
@ -473,8 +481,8 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
|||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.6/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.7 h1:uVG1Q/ZDJ7YmCI9Oevpue9xJEH5UrUMyXv8gm7NTxIw=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.7/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
|
||||
github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
|
@ -521,10 +529,12 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w=
|
||||
github.com/linode/linodego v1.16.1/go.mod h1:aESRAbpLY9R6IA1WGAWHikRI9DU9Lhesapv1MhKmPHM=
|
||||
github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU=
|
||||
github.com/linode/linodego v1.17.0/go.mod h1:/omzPxie0/YI6S0sTw1q47qDt5IYSlbO/infRR4UG+A=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
|
@ -557,8 +567,8 @@ github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04
|
|||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw=
|
||||
github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
|
||||
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
|
@ -636,6 +646,8 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv
|
|||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
|
@ -663,8 +675,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
|
|||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
|
@ -672,14 +684,14 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.9.1 h1:cNkC01riqiOS+kh3zdnNwRsbe/Blh0WwK3ij5rPJ9Sw=
|
||||
github.com/prometheus/exporter-toolkit v0.9.1/go.mod h1:iFlTmFISCix0vyuyBmm0UqOUCTao9+RsAsKJP3YM9ec=
|
||||
github.com/prometheus/exporter-toolkit v0.10.0 h1:yOAzZTi4M22ZzVxD+fhy1URTuNRj/36uQJJ5S8IPza8=
|
||||
github.com/prometheus/exporter-toolkit v0.10.0/go.mod h1:+sVFzuvV5JDyw+Ih6p3zFxZNVnKQa3x5qPmDSiPu4ZY=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
|
@ -695,15 +707,14 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
|||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c=
|
||||
github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
|
@ -745,8 +756,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
|
@ -788,31 +800,31 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI=
|
||||
go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
|
||||
go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 h1:3jAYbRHQAqzLjd9I4tzxwJ8Pk/N6AqBcF6m1ZHrxG94=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0/go.mod h1:+N7zNjIJv4K+DeX67XXET0P+eIciESgaFDBqh+ZJFS4=
|
||||
go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs=
|
||||
go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s=
|
||||
go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=
|
||||
go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM=
|
||||
go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
|
||||
go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8=
|
||||
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
|
||||
go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 h1:iqjq9LAB8aK++sKVcELezzn655JnBNdsDhghU4G/So8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0/go.mod h1:hGXzO5bhhSHZnKvrDaXB82Y9DRFour0Nz/KrBh7reWw=
|
||||
go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
|
||||
go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
|
||||
go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE=
|
||||
go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4=
|
||||
go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
|
||||
go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
|
||||
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
|
@ -838,8 +850,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
|
|||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -850,8 +862,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -922,8 +934,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU=
|
||||
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -931,8 +943,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
|
|||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
|
||||
golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
||||
golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs=
|
||||
golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -946,8 +958,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1003,6 +1015,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -1015,14 +1028,14 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28=
|
||||
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1033,8 +1046,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1094,8 +1107,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
|||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
|
||||
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1157,8 +1170,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
|||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
|
||||
google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA=
|
||||
google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1180,8 +1193,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv
|
|||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
|
||||
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1249,8 +1262,8 @@ k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ=
|
|||
k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
|
||||
k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI=
|
||||
k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU=
|
||||
k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU=
|
||||
k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY=
|
||||
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
|
||||
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
|
||||
k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc=
|
||||
k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
|
|
@ -118,12 +118,12 @@ func TestInverse(t *testing.T) {
|
|||
expected: &Matcher{Type: MatchEqual, Name: "name2", Value: "value2"},
|
||||
},
|
||||
{
|
||||
matcher: &Matcher{Type: MatchRegexp, Name: "name3", Value: "value3"},
|
||||
expected: &Matcher{Type: MatchNotRegexp, Name: "name3", Value: "value3"},
|
||||
matcher: &Matcher{Type: MatchRegexp, Name: "name3", Value: "value3.*"},
|
||||
expected: &Matcher{Type: MatchNotRegexp, Name: "name3", Value: "value3.*"},
|
||||
},
|
||||
{
|
||||
matcher: &Matcher{Type: MatchNotRegexp, Name: "name4", Value: "value4"},
|
||||
expected: &Matcher{Type: MatchRegexp, Name: "name4", Value: "value4"},
|
||||
matcher: &Matcher{Type: MatchNotRegexp, Name: "name4", Value: "value4.*"},
|
||||
expected: &Matcher{Type: MatchRegexp, Name: "name4", Value: "value4.*"},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -215,3 +215,13 @@ func BenchmarkMatchType_String(b *testing.B) {
|
|||
_ = MatchType(i % int(MatchNotRegexp+1)).String()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNewMatcher(b *testing.B) {
|
||||
b.Run("regex matcher with literal", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i <= b.N; i++ {
|
||||
NewMatcher(MatchRegexp, "foo", "bar")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -330,8 +330,8 @@ func isCaseSensitive(reg *syntax.Regexp) bool {
|
|||
}
|
||||
|
||||
// tooManyMatches guards against creating too many set matches
|
||||
func tooManyMatches(matches []string, new ...string) bool {
|
||||
return len(matches)+len(new) > maxSetMatches
|
||||
func tooManyMatches(matches []string, added ...string) bool {
|
||||
return len(matches)+len(added) > maxSetMatches
|
||||
}
|
||||
|
||||
func (m *FastRegexMatcher) MatchString(s string) bool {
|
||||
|
|
|
@ -349,19 +349,6 @@ func (n *Manager) Send(alerts ...*Alert) {
|
|||
n.mtx.Lock()
|
||||
defer n.mtx.Unlock()
|
||||
|
||||
// Attach external labels before relabelling and sending.
|
||||
for _, a := range alerts {
|
||||
lb := labels.NewBuilder(a.Labels)
|
||||
|
||||
n.opts.ExternalLabels.Range(func(l labels.Label) {
|
||||
if a.Labels.Get(l.Name) == "" {
|
||||
lb.Set(l.Name, l.Value)
|
||||
}
|
||||
})
|
||||
|
||||
a.Labels = lb.Labels()
|
||||
}
|
||||
|
||||
alerts = n.relabelAlerts(alerts)
|
||||
if len(alerts) == 0 {
|
||||
return
|
||||
|
@ -390,15 +377,25 @@ func (n *Manager) Send(alerts ...*Alert) {
|
|||
n.setMore()
|
||||
}
|
||||
|
||||
// Attach external labels and process relabelling rules.
|
||||
func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert {
|
||||
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||
var relabeledAlerts []*Alert
|
||||
|
||||
for _, alert := range alerts {
|
||||
labels, keep := relabel.Process(alert.Labels, n.opts.RelabelConfigs...)
|
||||
if keep {
|
||||
alert.Labels = labels
|
||||
relabeledAlerts = append(relabeledAlerts, alert)
|
||||
for _, a := range alerts {
|
||||
lb.Reset(a.Labels)
|
||||
n.opts.ExternalLabels.Range(func(l labels.Label) {
|
||||
if a.Labels.Get(l.Name) == "" {
|
||||
lb.Set(l.Name, l.Value)
|
||||
}
|
||||
})
|
||||
|
||||
keep := relabel.ProcessBuilder(lb, n.opts.RelabelConfigs...)
|
||||
if !keep {
|
||||
continue
|
||||
}
|
||||
a.Labels = lb.Labels()
|
||||
relabeledAlerts = append(relabeledAlerts, a)
|
||||
}
|
||||
return relabeledAlerts
|
||||
}
|
||||
|
@ -701,36 +698,38 @@ func postPath(pre string, v config.AlertmanagerAPIVersion) string {
|
|||
func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) {
|
||||
var res []alertmanager
|
||||
var droppedAlertManagers []alertmanager
|
||||
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||
|
||||
for _, tlset := range tg.Targets {
|
||||
lbls := make([]labels.Label, 0, len(tlset)+2+len(tg.Labels))
|
||||
lb.Reset(labels.EmptyLabels())
|
||||
|
||||
for ln, lv := range tlset {
|
||||
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
|
||||
lb.Set(string(ln), string(lv))
|
||||
}
|
||||
// Set configured scheme as the initial scheme label for overwrite.
|
||||
lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: cfg.Scheme})
|
||||
lbls = append(lbls, labels.Label{Name: pathLabel, Value: postPath(cfg.PathPrefix, cfg.APIVersion)})
|
||||
lb.Set(model.SchemeLabel, cfg.Scheme)
|
||||
lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion))
|
||||
|
||||
// Combine target labels with target group labels.
|
||||
for ln, lv := range tg.Labels {
|
||||
if _, ok := tlset[ln]; !ok {
|
||||
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
|
||||
lb.Set(string(ln), string(lv))
|
||||
}
|
||||
}
|
||||
|
||||
lset, keep := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...)
|
||||
preRelabel := lb.Labels()
|
||||
keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...)
|
||||
if !keep {
|
||||
droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{labels.New(lbls...)})
|
||||
droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel})
|
||||
continue
|
||||
}
|
||||
|
||||
addr := lset.Get(model.AddressLabel)
|
||||
addr := lb.Get(model.AddressLabel)
|
||||
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
res = append(res, alertmanagerLabels{lset})
|
||||
res = append(res, alertmanagerLabels{lb.Labels()})
|
||||
}
|
||||
return res, droppedAlertManagers, nil
|
||||
}
|
||||
|
|
|
@ -408,44 +408,50 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) {
|
|||
}
|
||||
|
||||
// NewInstantQuery returns an evaluation query for the given expression at the given time.
|
||||
func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) {
|
||||
func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) {
|
||||
pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0)
|
||||
finishQueue, err := ng.queueActive(ctx, qry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer finishQueue()
|
||||
expr, err := parser.ParseExpr(qs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qry, err := ng.newQuery(q, opts, expr, ts, ts, 0)
|
||||
if err != nil {
|
||||
if err := ng.validateOpts(expr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qry.q = qs
|
||||
*pExpr = PreprocessExpr(expr, ts, ts)
|
||||
|
||||
return qry, nil
|
||||
}
|
||||
|
||||
// NewRangeQuery returns an evaluation query for the given time range and with
|
||||
// the resolution set by the interval.
|
||||
func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) {
|
||||
func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) {
|
||||
pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval)
|
||||
finishQueue, err := ng.queueActive(ctx, qry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer finishQueue()
|
||||
expr, err := parser.ParseExpr(qs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ng.validateOpts(expr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar {
|
||||
return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type()))
|
||||
}
|
||||
qry, err := ng.newQuery(q, opts, expr, start, end, interval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qry.q = qs
|
||||
*pExpr = PreprocessExpr(expr, start, end)
|
||||
|
||||
return qry, nil
|
||||
}
|
||||
|
||||
func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Expr, start, end time.Time, interval time.Duration) (*query, error) {
|
||||
if err := ng.validateOpts(expr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (ng *Engine) newQuery(q storage.Queryable, qs string, opts *QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) {
|
||||
// Default to empty QueryOpts if not provided.
|
||||
if opts == nil {
|
||||
opts = &QueryOpts{}
|
||||
|
@ -457,20 +463,20 @@ func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Exp
|
|||
}
|
||||
|
||||
es := &parser.EvalStmt{
|
||||
Expr: PreprocessExpr(expr, start, end),
|
||||
Start: start,
|
||||
End: end,
|
||||
Interval: interval,
|
||||
LookbackDelta: lookbackDelta,
|
||||
}
|
||||
qry := &query{
|
||||
q: qs,
|
||||
stmt: es,
|
||||
ng: ng,
|
||||
stats: stats.NewQueryTimers(),
|
||||
sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats),
|
||||
queryable: q,
|
||||
}
|
||||
return qry, nil
|
||||
return &es.Expr, qry
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -589,18 +595,11 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag
|
|||
execSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.ExecTotalTime)
|
||||
defer execSpanTimer.Finish()
|
||||
|
||||
queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime)
|
||||
// Log query in active log. The active log guarantees that we don't run over
|
||||
// MaxConcurrent queries.
|
||||
if ng.activeQueryTracker != nil {
|
||||
queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q)
|
||||
if err != nil {
|
||||
queueSpanTimer.Finish()
|
||||
return nil, nil, contextErr(err, "query queue")
|
||||
}
|
||||
defer ng.activeQueryTracker.Delete(queryIndex)
|
||||
finishQueue, err := ng.queueActive(ctx, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
queueSpanTimer.Finish()
|
||||
defer finishQueue()
|
||||
|
||||
// Cancel when execution is done or an error was raised.
|
||||
defer q.cancel()
|
||||
|
@ -623,6 +622,18 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag
|
|||
panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement()))
|
||||
}
|
||||
|
||||
// Log query in active log. The active log guarantees that we don't run over
|
||||
// MaxConcurrent queries.
|
||||
func (ng *Engine) queueActive(ctx context.Context, q *query) (func(), error) {
|
||||
if ng.activeQueryTracker == nil {
|
||||
return func() {}, nil
|
||||
}
|
||||
queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime)
|
||||
queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q)
|
||||
queueSpanTimer.Finish()
|
||||
return func() { ng.activeQueryTracker.Delete(queryIndex) }, err
|
||||
}
|
||||
|
||||
func timeMilliseconds(t time.Time) int64 {
|
||||
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
|
||||
}
|
||||
|
|
|
@ -387,7 +387,7 @@ var Functions = map[string]*Function{
|
|||
}
|
||||
|
||||
// getFunction returns a predefined Function object for the given name.
|
||||
func getFunction(name string) (*Function, bool) {
|
||||
function, ok := Functions[name]
|
||||
func getFunction(name string, functions map[string]*Function) (*Function, bool) {
|
||||
function, ok := functions[name]
|
||||
return function, ok
|
||||
}
|
||||
|
|
|
@ -339,7 +339,7 @@ grouping_label : maybe_label
|
|||
|
||||
function_call : IDENTIFIER function_call_body
|
||||
{
|
||||
fn, exist := getFunction($1.Val)
|
||||
fn, exist := getFunction($1.Val, yylex.(*parser).functions)
|
||||
if !exist{
|
||||
yylex.(*parser).addParseErrf($1.PositionRange(),"unknown function with name %q", $1.Val)
|
||||
}
|
||||
|
|
|
@ -1210,7 +1210,7 @@ yydefault:
|
|||
yyDollar = yyS[yypt-2 : yypt+1]
|
||||
//line promql/parser/generated_parser.y:341
|
||||
{
|
||||
fn, exist := getFunction(yyDollar[1].item.Val)
|
||||
fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions)
|
||||
if !exist {
|
||||
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "unknown function with name %q", yyDollar[1].item.Val)
|
||||
}
|
||||
|
|
|
@ -37,12 +37,20 @@ var parserPool = sync.Pool{
|
|||
},
|
||||
}
|
||||
|
||||
type Parser interface {
|
||||
ParseExpr() (Expr, error)
|
||||
Close()
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
lex Lexer
|
||||
|
||||
inject ItemType
|
||||
injecting bool
|
||||
|
||||
// functions contains all functions supported by the parser instance.
|
||||
functions map[string]*Function
|
||||
|
||||
// Everytime an Item is lexed that could be the end
|
||||
// of certain expressions its end position is stored here.
|
||||
lastClosing Pos
|
||||
|
@ -53,6 +61,63 @@ type parser struct {
|
|||
parseErrors ParseErrors
|
||||
}
|
||||
|
||||
type Opt func(p *parser)
|
||||
|
||||
func WithFunctions(functions map[string]*Function) Opt {
|
||||
return func(p *parser) {
|
||||
p.functions = functions
|
||||
}
|
||||
}
|
||||
|
||||
// NewParser returns a new parser.
|
||||
// nolint:revive
|
||||
func NewParser(input string, opts ...Opt) *parser {
|
||||
p := parserPool.Get().(*parser)
|
||||
|
||||
p.functions = Functions
|
||||
p.injecting = false
|
||||
p.parseErrors = nil
|
||||
p.generatedParserResult = nil
|
||||
|
||||
// Clear lexer struct before reusing.
|
||||
p.lex = Lexer{
|
||||
input: input,
|
||||
state: lexStatements,
|
||||
}
|
||||
|
||||
// Apply user define options.
|
||||
for _, opt := range opts {
|
||||
opt(p)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *parser) ParseExpr() (expr Expr, err error) {
|
||||
defer p.recover(&err)
|
||||
|
||||
parseResult := p.parseGenerated(START_EXPRESSION)
|
||||
|
||||
if parseResult != nil {
|
||||
expr = parseResult.(Expr)
|
||||
}
|
||||
|
||||
// Only typecheck when there are no syntax errors.
|
||||
if len(p.parseErrors) == 0 {
|
||||
p.checkAST(expr)
|
||||
}
|
||||
|
||||
if len(p.parseErrors) != 0 {
|
||||
err = p.parseErrors
|
||||
}
|
||||
|
||||
return expr, err
|
||||
}
|
||||
|
||||
func (p *parser) Close() {
|
||||
defer parserPool.Put(p)
|
||||
}
|
||||
|
||||
// ParseErr wraps a parsing error with line and position context.
|
||||
type ParseErr struct {
|
||||
PositionRange PositionRange
|
||||
|
@ -105,32 +170,15 @@ func (errs ParseErrors) Error() string {
|
|||
|
||||
// ParseExpr returns the expression parsed from the input.
|
||||
func ParseExpr(input string) (expr Expr, err error) {
|
||||
p := newParser(input)
|
||||
defer parserPool.Put(p)
|
||||
defer p.recover(&err)
|
||||
|
||||
parseResult := p.parseGenerated(START_EXPRESSION)
|
||||
|
||||
if parseResult != nil {
|
||||
expr = parseResult.(Expr)
|
||||
}
|
||||
|
||||
// Only typecheck when there are no syntax errors.
|
||||
if len(p.parseErrors) == 0 {
|
||||
p.checkAST(expr)
|
||||
}
|
||||
|
||||
if len(p.parseErrors) != 0 {
|
||||
err = p.parseErrors
|
||||
}
|
||||
|
||||
return expr, err
|
||||
p := NewParser(input)
|
||||
defer p.Close()
|
||||
return p.ParseExpr()
|
||||
}
|
||||
|
||||
// ParseMetric parses the input into a metric
|
||||
func ParseMetric(input string) (m labels.Labels, err error) {
|
||||
p := newParser(input)
|
||||
defer parserPool.Put(p)
|
||||
p := NewParser(input)
|
||||
defer p.Close()
|
||||
defer p.recover(&err)
|
||||
|
||||
parseResult := p.parseGenerated(START_METRIC)
|
||||
|
@ -148,8 +196,8 @@ func ParseMetric(input string) (m labels.Labels, err error) {
|
|||
// ParseMetricSelector parses the provided textual metric selector into a list of
|
||||
// label matchers.
|
||||
func ParseMetricSelector(input string) (m []*labels.Matcher, err error) {
|
||||
p := newParser(input)
|
||||
defer parserPool.Put(p)
|
||||
p := NewParser(input)
|
||||
defer p.Close()
|
||||
defer p.recover(&err)
|
||||
|
||||
parseResult := p.parseGenerated(START_METRIC_SELECTOR)
|
||||
|
@ -164,22 +212,6 @@ func ParseMetricSelector(input string) (m []*labels.Matcher, err error) {
|
|||
return m, err
|
||||
}
|
||||
|
||||
// newParser returns a new parser.
|
||||
func newParser(input string) *parser {
|
||||
p := parserPool.Get().(*parser)
|
||||
|
||||
p.injecting = false
|
||||
p.parseErrors = nil
|
||||
p.generatedParserResult = nil
|
||||
|
||||
// Clear lexer struct before reusing.
|
||||
p.lex = Lexer{
|
||||
input: input,
|
||||
state: lexStatements,
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// SequenceValue is an omittable value in a sequence of time series values.
|
||||
type SequenceValue struct {
|
||||
Value float64
|
||||
|
@ -200,10 +232,10 @@ type seriesDescription struct {
|
|||
|
||||
// ParseSeriesDesc parses the description of a time series.
|
||||
func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue, err error) {
|
||||
p := newParser(input)
|
||||
p := NewParser(input)
|
||||
p.lex.seriesDesc = true
|
||||
|
||||
defer parserPool.Put(p)
|
||||
defer p.Close()
|
||||
defer p.recover(&err)
|
||||
|
||||
parseResult := p.parseGenerated(START_SERIES_DESCRIPTION)
|
||||
|
@ -799,7 +831,7 @@ func MustLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher {
|
|||
}
|
||||
|
||||
func MustGetFunction(name string) *Function {
|
||||
f, ok := getFunction(name)
|
||||
f, ok := getFunction(name, Functions)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("function %q does not exist", name))
|
||||
}
|
||||
|
|
|
@ -3739,7 +3739,7 @@ func TestParseSeries(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRecoverParserRuntime(t *testing.T) {
|
||||
p := newParser("foo bar")
|
||||
p := NewParser("foo bar")
|
||||
var err error
|
||||
|
||||
defer func() {
|
||||
|
@ -3753,7 +3753,7 @@ func TestRecoverParserRuntime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRecoverParserError(t *testing.T) {
|
||||
p := newParser("foo bar")
|
||||
p := NewParser("foo bar")
|
||||
var err error
|
||||
|
||||
e := errors.New("custom error")
|
||||
|
@ -3801,3 +3801,20 @@ func TestExtractSelectors(t *testing.T) {
|
|||
require.Equal(t, expected, ExtractSelectors(expr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCustomFunctions(t *testing.T) {
|
||||
funcs := Functions
|
||||
funcs["custom_func"] = &Function{
|
||||
Name: "custom_func",
|
||||
ArgTypes: []ValueType{ValueTypeMatrix},
|
||||
ReturnType: ValueTypeVector,
|
||||
}
|
||||
input := "custom_func(metric[1m])"
|
||||
p := NewParser(input, WithFunctions(funcs))
|
||||
expr, err := p.ParseExpr()
|
||||
require.NoError(t, err)
|
||||
|
||||
call, ok := expr.(*Call)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "custom_func", call.Func.Name)
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ type Manager struct {
|
|||
append storage.Appendable
|
||||
graceShut chan struct{}
|
||||
|
||||
jitterSeed uint64 // Global jitterSeed seed is used to spread scrape workload across HA setup.
|
||||
offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup.
|
||||
mtxScrape sync.Mutex // Guards the fields below.
|
||||
scrapeConfigs map[string]*config.ScrapeConfig
|
||||
scrapePools map[string]*scrapePool
|
||||
|
@ -214,7 +214,7 @@ func (m *Manager) reload() {
|
|||
level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
|
||||
continue
|
||||
}
|
||||
sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts)
|
||||
sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.opts)
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName)
|
||||
continue
|
||||
|
@ -234,8 +234,8 @@ func (m *Manager) reload() {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
// setJitterSeed calculates a global jitterSeed per server relying on extra label set.
|
||||
func (m *Manager) setJitterSeed(labels labels.Labels) error {
|
||||
// setOffsetSeed calculates a global offsetSeed per server relying on extra label set.
|
||||
func (m *Manager) setOffsetSeed(labels labels.Labels) error {
|
||||
h := fnv.New64a()
|
||||
hostname, err := osutil.GetFQDN()
|
||||
if err != nil {
|
||||
|
@ -244,7 +244,7 @@ func (m *Manager) setJitterSeed(labels labels.Labels) error {
|
|||
if _, err := fmt.Fprintf(h, "%s%s", hostname, labels.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
m.jitterSeed = h.Sum64()
|
||||
m.offsetSeed = h.Sum64()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -281,7 +281,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
|
|||
}
|
||||
m.scrapeConfigs = c
|
||||
|
||||
if err := m.setJitterSeed(cfg.GlobalConfig.ExternalLabels); err != nil {
|
||||
if err := m.setOffsetSeed(cfg.GlobalConfig.ExternalLabels); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -596,7 +596,7 @@ func TestManagerTargetsUpdates(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSetJitter(t *testing.T) {
|
||||
func TestSetOffsetSeed(t *testing.T) {
|
||||
getConfig := func(prometheus string) *config.Config {
|
||||
cfgText := `
|
||||
global:
|
||||
|
@ -617,24 +617,24 @@ global:
|
|||
|
||||
// Load the first config.
|
||||
cfg1 := getConfig("ha1")
|
||||
if err := scrapeManager.setJitterSeed(cfg1.GlobalConfig.ExternalLabels); err != nil {
|
||||
if err := scrapeManager.setOffsetSeed(cfg1.GlobalConfig.ExternalLabels); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
jitter1 := scrapeManager.jitterSeed
|
||||
offsetSeed1 := scrapeManager.offsetSeed
|
||||
|
||||
if jitter1 == 0 {
|
||||
t.Error("Jitter has to be a hash of uint64")
|
||||
if offsetSeed1 == 0 {
|
||||
t.Error("Offset seed has to be a hash of uint64")
|
||||
}
|
||||
|
||||
// Load the first config.
|
||||
cfg2 := getConfig("ha2")
|
||||
if err := scrapeManager.setJitterSeed(cfg2.GlobalConfig.ExternalLabels); err != nil {
|
||||
if err := scrapeManager.setOffsetSeed(cfg2.GlobalConfig.ExternalLabels); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
jitter2 := scrapeManager.jitterSeed
|
||||
offsetSeed2 := scrapeManager.offsetSeed
|
||||
|
||||
if jitter1 == jitter2 {
|
||||
t.Error("Jitter should not be the same on different set of external labels")
|
||||
if offsetSeed1 == offsetSeed2 {
|
||||
t.Error("Offset seed should not be the same on different set of external labels")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -279,7 +279,7 @@ const maxAheadTime = 10 * time.Minute
|
|||
// returning an empty label set is interpreted as "drop"
|
||||
type labelsMutator func(labels.Labels) labels.Labels
|
||||
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) {
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) {
|
||||
targetScrapePools.Inc()
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
|
@ -325,7 +325,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
|
||||
func(ctx context.Context) storage.Appender { return app.Appender(ctx) },
|
||||
cache,
|
||||
jitterSeed,
|
||||
offsetSeed,
|
||||
opts.honorTimestamps,
|
||||
opts.sampleLimit,
|
||||
opts.bucketLimit,
|
||||
|
@ -775,7 +775,7 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int) storage.Append
|
|||
type scraper interface {
|
||||
scrape(ctx context.Context, w io.Writer) (string, error)
|
||||
Report(start time.Time, dur time.Duration, err error)
|
||||
offset(interval time.Duration, jitterSeed uint64) time.Duration
|
||||
offset(interval time.Duration, offsetSeed uint64) time.Duration
|
||||
}
|
||||
|
||||
// targetScraper implements the scraper interface for a target.
|
||||
|
@ -891,7 +891,7 @@ type scrapeLoop struct {
|
|||
cache *scrapeCache
|
||||
lastScrapeSize int
|
||||
buffers *pool.Pool
|
||||
jitterSeed uint64
|
||||
offsetSeed uint64
|
||||
honorTimestamps bool
|
||||
forcedErr error
|
||||
forcedErrMtx sync.Mutex
|
||||
|
@ -1175,7 +1175,7 @@ func newScrapeLoop(ctx context.Context,
|
|||
reportSampleMutator labelsMutator,
|
||||
appender func(ctx context.Context) storage.Appender,
|
||||
cache *scrapeCache,
|
||||
jitterSeed uint64,
|
||||
offsetSeed uint64,
|
||||
honorTimestamps bool,
|
||||
sampleLimit int,
|
||||
bucketLimit int,
|
||||
|
@ -1217,7 +1217,7 @@ func newScrapeLoop(ctx context.Context,
|
|||
sampleMutator: sampleMutator,
|
||||
reportSampleMutator: reportSampleMutator,
|
||||
stopped: make(chan struct{}),
|
||||
jitterSeed: jitterSeed,
|
||||
offsetSeed: offsetSeed,
|
||||
l: l,
|
||||
parentCtx: ctx,
|
||||
appenderCtx: appenderCtx,
|
||||
|
@ -1238,7 +1238,7 @@ func newScrapeLoop(ctx context.Context,
|
|||
|
||||
func (sl *scrapeLoop) run(errc chan<- error) {
|
||||
select {
|
||||
case <-time.After(sl.scraper.offset(sl.interval, sl.jitterSeed)):
|
||||
case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)):
|
||||
// Continue after a scraping offset.
|
||||
case <-sl.ctx.Done():
|
||||
close(sl.stopped)
|
||||
|
|
|
@ -154,14 +154,14 @@ func (t *Target) hash() uint64 {
|
|||
}
|
||||
|
||||
// offset returns the time until the next scrape cycle for the target.
|
||||
// It includes the global server jitterSeed for scrapes from multiple Prometheus to try to be at different times.
|
||||
func (t *Target) offset(interval time.Duration, jitterSeed uint64) time.Duration {
|
||||
// It includes the global server offsetSeed for scrapes from multiple Prometheus to try to be at different times.
|
||||
func (t *Target) offset(interval time.Duration, offsetSeed uint64) time.Duration {
|
||||
now := time.Now().UnixNano()
|
||||
|
||||
// Base is a pinned to absolute time, no matter how often offset is called.
|
||||
var (
|
||||
base = int64(interval) - now%int64(interval)
|
||||
offset = (t.hash() ^ jitterSeed) % uint64(interval)
|
||||
offset = (t.hash() ^ offsetSeed) % uint64(interval)
|
||||
next = base + int64(offset)
|
||||
)
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ func TestTargetLabels(t *testing.T) {
|
|||
|
||||
func TestTargetOffset(t *testing.T) {
|
||||
interval := 10 * time.Second
|
||||
jitter := uint64(0)
|
||||
offsetSeed := uint64(0)
|
||||
|
||||
offsets := make([]time.Duration, 10000)
|
||||
|
||||
|
@ -68,7 +68,7 @@ func TestTargetOffset(t *testing.T) {
|
|||
target := newTestTarget("example.com:80", 0, labels.FromStrings(
|
||||
"label", fmt.Sprintf("%d", i),
|
||||
))
|
||||
offsets[i] = target.offset(interval, jitter)
|
||||
offsets[i] = target.offset(interval, offsetSeed)
|
||||
}
|
||||
|
||||
// Put the offsets into buckets and validate that they are all
|
||||
|
|
32
scripts/golangci-lint.yml
Normal file
32
scripts/golangci-lint.yml
Normal file
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
# This action is synced from https://github.com/prometheus/prometheus
|
||||
name: golangci-lint
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "go.sum"
|
||||
- "go.mod"
|
||||
- "**.go"
|
||||
- "scripts/errcheck_excludes.txt"
|
||||
- ".github/workflows/golangci-lint.yml"
|
||||
- ".golangci.yml"
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@v3.4.0
|
||||
with:
|
||||
version: v1.53.3
|
|
@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then
|
|||
fi
|
||||
|
||||
# List of files that should be synced.
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint .github/workflows/golangci-lint.yml"
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml"
|
||||
|
||||
# Go to the root of the repo
|
||||
cd "$(git rev-parse --show-cdup)" || exit 1
|
||||
|
@ -115,20 +115,23 @@ process_repo() {
|
|||
local needs_update=()
|
||||
for source_file in ${SYNC_FILES}; do
|
||||
source_checksum="$(sha256sum "${source_dir}/${source_file}" | cut -d' ' -f1)"
|
||||
|
||||
target_file="$(curl -sL --fail "https://raw.githubusercontent.com/${org_repo}/${default_branch}/${source_file}")"
|
||||
if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] && ! check_go "${org_repo}" "${default_branch}" ; then
|
||||
echo "${org_repo} is not Go, skipping golangci-lint.yml."
|
||||
continue
|
||||
fi
|
||||
if [[ "${source_file}" == 'LICENSE' ]] && ! check_license "${target_file}" ; then
|
||||
echo "LICENSE in ${org_repo} is not apache, skipping."
|
||||
continue
|
||||
fi
|
||||
if [[ "${source_file}" == '.github/workflows/golangci-lint.yml' ]] && ! check_go "${org_repo}" "${default_branch}" ; then
|
||||
echo "${org_repo} is not Go, skipping .github/workflows/golangci-lint.yml."
|
||||
continue
|
||||
target_filename="${source_file}"
|
||||
if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] ; then
|
||||
target_filename=".github/workflows/${source_file}"
|
||||
fi
|
||||
target_file="$(curl -sL --fail "https://raw.githubusercontent.com/${org_repo}/${default_branch}/${target_filename}")"
|
||||
if [[ -z "${target_file}" ]]; then
|
||||
echo "${source_file} doesn't exist in ${org_repo}"
|
||||
case "${source_file}" in
|
||||
CODE_OF_CONDUCT.md | SECURITY.md | .github/workflows/golangci-lint.yml)
|
||||
CODE_OF_CONDUCT.md | SECURITY.md)
|
||||
echo "${source_file} missing in ${org_repo}, force updating."
|
||||
needs_update+=("${source_file}")
|
||||
;;
|
||||
|
@ -159,8 +162,12 @@ process_repo() {
|
|||
|
||||
# Update the files in target repo by one from prometheus/prometheus.
|
||||
for source_file in "${needs_update[@]}"; do
|
||||
target_filename="${source_file}"
|
||||
if [[ "${source_file}" == 'scripts/golangci-lint.yml' ]] ; then
|
||||
target_filename=".github/workflows/${source_file}"
|
||||
fi
|
||||
case "${source_file}" in
|
||||
*) cp -f "${source_dir}/${source_file}" "./${source_file}" ;;
|
||||
*) cp -f "${source_dir}/${source_file}" "./${target_filename}" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
|
8
storage/remote/azuread/README.md
Normal file
8
storage/remote/azuread/README.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
azuread package
|
||||
=========================================
|
||||
|
||||
azuread provides an http.RoundTripper that attaches an Azure AD accessToken
|
||||
to remote write requests.
|
||||
|
||||
This module is considered internal to Prometheus, without any stability
|
||||
guarantees for external usage.
|
247
storage/remote/azuread/azuread.go
Normal file
247
storage/remote/azuread/azuread.go
Normal file
|
@ -0,0 +1,247 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package azuread
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
// Clouds.
|
||||
AzureChina = "AzureChina"
|
||||
AzureGovernment = "AzureGovernment"
|
||||
AzurePublic = "AzurePublic"
|
||||
|
||||
// Audiences.
|
||||
IngestionChinaAudience = "https://monitor.azure.cn//.default"
|
||||
IngestionGovernmentAudience = "https://monitor.azure.us//.default"
|
||||
IngestionPublicAudience = "https://monitor.azure.com//.default"
|
||||
)
|
||||
|
||||
// ManagedIdentityConfig is used to store managed identity config values
|
||||
type ManagedIdentityConfig struct {
|
||||
// ClientID is the clientId of the managed identity that is being used to authenticate.
|
||||
ClientID string `yaml:"client_id,omitempty"`
|
||||
}
|
||||
|
||||
// AzureADConfig is used to store the config values.
|
||||
type AzureADConfig struct { // nolint:revive
|
||||
// ManagedIdentity is the managed identity that is being used to authenticate.
|
||||
ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"`
|
||||
|
||||
// Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina.
|
||||
Cloud string `yaml:"cloud,omitempty"`
|
||||
}
|
||||
|
||||
// azureADRoundTripper is used to store the roundtripper and the tokenprovider.
|
||||
type azureADRoundTripper struct {
|
||||
next http.RoundTripper
|
||||
tokenProvider *tokenProvider
|
||||
}
|
||||
|
||||
// tokenProvider is used to store and retrieve Azure AD accessToken.
|
||||
type tokenProvider struct {
|
||||
// token is member used to store the current valid accessToken.
|
||||
token string
|
||||
// mu guards access to token.
|
||||
mu sync.Mutex
|
||||
// refreshTime is used to store the refresh time of the current valid accessToken.
|
||||
refreshTime time.Time
|
||||
// credentialClient is the Azure AD credential client that is being used to retrieve accessToken.
|
||||
credentialClient azcore.TokenCredential
|
||||
options *policy.TokenRequestOptions
|
||||
}
|
||||
|
||||
// Validate validates config values provided.
|
||||
func (c *AzureADConfig) Validate() error {
|
||||
if c.Cloud == "" {
|
||||
c.Cloud = AzurePublic
|
||||
}
|
||||
|
||||
if c.Cloud != AzureChina && c.Cloud != AzureGovernment && c.Cloud != AzurePublic {
|
||||
return fmt.Errorf("must provide a cloud in the Azure AD config")
|
||||
}
|
||||
|
||||
if c.ManagedIdentity == nil {
|
||||
return fmt.Errorf("must provide an Azure Managed Identity in the Azure AD config")
|
||||
}
|
||||
|
||||
if c.ManagedIdentity.ClientID == "" {
|
||||
return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config")
|
||||
}
|
||||
|
||||
_, err := uuid.Parse(c.ManagedIdentity.ClientID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("the provided Azure Managed Identity client_id provided is invalid")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML unmarshal the Azure AD config yaml.
|
||||
func (c *AzureADConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
type plain AzureADConfig
|
||||
*c = AzureADConfig{}
|
||||
if err := unmarshal((*plain)(c)); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Validate()
|
||||
}
|
||||
|
||||
// NewAzureADRoundTripper creates round tripper adding Azure AD authorization to calls.
|
||||
func NewAzureADRoundTripper(cfg *AzureADConfig, next http.RoundTripper) (http.RoundTripper, error) {
|
||||
if next == nil {
|
||||
next = http.DefaultTransport
|
||||
}
|
||||
|
||||
cred, err := newTokenCredential(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tokenProvider, err := newTokenProvider(cfg, cred)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rt := &azureADRoundTripper{
|
||||
next: next,
|
||||
tokenProvider: tokenProvider,
|
||||
}
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
// RoundTrip sets Authorization header for requests.
|
||||
func (rt *azureADRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
accessToken, err := rt.tokenProvider.getAccessToken(req.Context())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bearerAccessToken := "Bearer " + accessToken
|
||||
req.Header.Set("Authorization", bearerAccessToken)
|
||||
|
||||
return rt.next.RoundTrip(req)
|
||||
}
|
||||
|
||||
// newTokenCredential returns a TokenCredential of different kinds like Azure Managed Identity and Azure AD application.
|
||||
func newTokenCredential(cfg *AzureADConfig) (azcore.TokenCredential, error) {
|
||||
cred, err := newManagedIdentityTokenCredential(cfg.ManagedIdentity.ClientID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
// newManagedIdentityTokenCredential returns new Managed Identity token credential.
|
||||
func newManagedIdentityTokenCredential(managedIdentityClientID string) (azcore.TokenCredential, error) {
|
||||
clientID := azidentity.ClientID(managedIdentityClientID)
|
||||
opts := &azidentity.ManagedIdentityCredentialOptions{ID: clientID}
|
||||
return azidentity.NewManagedIdentityCredential(opts)
|
||||
}
|
||||
|
||||
// newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of
|
||||
// refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests.
|
||||
func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) {
|
||||
audience, err := getAudience(cfg.Cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tokenProvider := &tokenProvider{
|
||||
credentialClient: cred,
|
||||
options: &policy.TokenRequestOptions{Scopes: []string{audience}},
|
||||
}
|
||||
|
||||
return tokenProvider, nil
|
||||
}
|
||||
|
||||
// getAccessToken returns the current valid accessToken.
|
||||
func (tokenProvider *tokenProvider) getAccessToken(ctx context.Context) (string, error) {
|
||||
tokenProvider.mu.Lock()
|
||||
defer tokenProvider.mu.Unlock()
|
||||
if tokenProvider.valid() {
|
||||
return tokenProvider.token, nil
|
||||
}
|
||||
err := tokenProvider.getToken(ctx)
|
||||
if err != nil {
|
||||
return "", errors.New("Failed to get access token: " + err.Error())
|
||||
}
|
||||
return tokenProvider.token, nil
|
||||
}
|
||||
|
||||
// valid checks if the token in the token provider is valid and not expired.
|
||||
func (tokenProvider *tokenProvider) valid() bool {
|
||||
if len(tokenProvider.token) == 0 {
|
||||
return false
|
||||
}
|
||||
if tokenProvider.refreshTime.After(time.Now().UTC()) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getToken retrieves a new accessToken and stores the newly retrieved token in the tokenProvider.
|
||||
func (tokenProvider *tokenProvider) getToken(ctx context.Context) error {
|
||||
accessToken, err := tokenProvider.credentialClient.GetToken(ctx, *tokenProvider.options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(accessToken.Token) == 0 {
|
||||
return errors.New("access token is empty")
|
||||
}
|
||||
|
||||
tokenProvider.token = accessToken.Token
|
||||
err = tokenProvider.updateRefreshTime(accessToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateRefreshTime handles logic to set refreshTime. The refreshTime is set at half the duration of the actual token expiry.
|
||||
func (tokenProvider *tokenProvider) updateRefreshTime(accessToken azcore.AccessToken) error {
|
||||
tokenExpiryTimestamp := accessToken.ExpiresOn.UTC()
|
||||
deltaExpirytime := time.Now().Add(time.Until(tokenExpiryTimestamp) / 2)
|
||||
if deltaExpirytime.After(time.Now().UTC()) {
|
||||
tokenProvider.refreshTime = deltaExpirytime
|
||||
} else {
|
||||
return errors.New("access token expiry is less than the current time")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAudience returns audiences for different clouds.
|
||||
func getAudience(cloud string) (string, error) {
|
||||
switch strings.ToLower(cloud) {
|
||||
case strings.ToLower(AzureChina):
|
||||
return IngestionChinaAudience, nil
|
||||
case strings.ToLower(AzureGovernment):
|
||||
return IngestionGovernmentAudience, nil
|
||||
case strings.ToLower(AzurePublic):
|
||||
return IngestionPublicAudience, nil
|
||||
default:
|
||||
return "", errors.New("Cloud is not specified or is incorrect: " + cloud)
|
||||
}
|
||||
}
|
252
storage/remote/azuread/azuread_test.go
Normal file
252
storage/remote/azuread/azuread_test.go
Normal file
|
@ -0,0 +1,252 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package azuread
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
dummyAudience = "dummyAudience"
|
||||
dummyClientID = "00000000-0000-0000-0000-000000000000"
|
||||
testTokenString = "testTokenString"
|
||||
)
|
||||
|
||||
var testTokenExpiry = time.Now().Add(10 * time.Second)
|
||||
|
||||
type AzureAdTestSuite struct {
|
||||
suite.Suite
|
||||
mockCredential *mockCredential
|
||||
}
|
||||
|
||||
type TokenProviderTestSuite struct {
|
||||
suite.Suite
|
||||
mockCredential *mockCredential
|
||||
}
|
||||
|
||||
// mockCredential mocks azidentity TokenCredential interface.
|
||||
type mockCredential struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (ad *AzureAdTestSuite) BeforeTest(_, _ string) {
|
||||
ad.mockCredential = new(mockCredential)
|
||||
}
|
||||
|
||||
func TestAzureAd(t *testing.T) {
|
||||
suite.Run(t, new(AzureAdTestSuite))
|
||||
}
|
||||
|
||||
func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() {
|
||||
var gotReq *http.Request
|
||||
|
||||
testToken := &azcore.AccessToken{
|
||||
Token: testTokenString,
|
||||
ExpiresOn: testTokenExpiry,
|
||||
}
|
||||
|
||||
managedIdentityConfig := &ManagedIdentityConfig{
|
||||
ClientID: dummyClientID,
|
||||
}
|
||||
|
||||
azureAdConfig := &AzureADConfig{
|
||||
Cloud: "AzurePublic",
|
||||
ManagedIdentity: managedIdentityConfig,
|
||||
}
|
||||
|
||||
ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil)
|
||||
|
||||
tokenProvider, err := newTokenProvider(azureAdConfig, ad.mockCredential)
|
||||
ad.Assert().NoError(err)
|
||||
|
||||
rt := &azureADRoundTripper{
|
||||
next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
|
||||
gotReq = req
|
||||
return &http.Response{StatusCode: http.StatusOK}, nil
|
||||
}),
|
||||
tokenProvider: tokenProvider,
|
||||
}
|
||||
|
||||
cli := &http.Client{Transport: rt}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!"))
|
||||
ad.Assert().NoError(err)
|
||||
|
||||
_, err = cli.Do(req)
|
||||
ad.Assert().NoError(err)
|
||||
ad.Assert().NotNil(gotReq)
|
||||
|
||||
origReq := gotReq
|
||||
ad.Assert().NotEmpty(origReq.Header.Get("Authorization"))
|
||||
ad.Assert().Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization"))
|
||||
}
|
||||
|
||||
func loadAzureAdConfig(filename string) (*AzureADConfig, error) {
|
||||
content, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg := AzureADConfig{}
|
||||
if err = yaml.UnmarshalStrict(content, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func testGoodConfig(t *testing.T, filename string) {
|
||||
_, err := loadAzureAdConfig(filename)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error parsing %s: %s", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoodAzureAdConfig(t *testing.T) {
|
||||
filename := "testdata/azuread_good.yaml"
|
||||
testGoodConfig(t, filename)
|
||||
}
|
||||
|
||||
func TestGoodCloudMissingAzureAdConfig(t *testing.T) {
|
||||
filename := "testdata/azuread_good_cloudmissing.yaml"
|
||||
testGoodConfig(t, filename)
|
||||
}
|
||||
|
||||
func TestBadClientIdMissingAzureAdConfig(t *testing.T) {
|
||||
filename := "testdata/azuread_bad_clientidmissing.yaml"
|
||||
_, err := loadAzureAdConfig(filename)
|
||||
if err == nil {
|
||||
t.Fatalf("Did not receive expected error unmarshaling bad azuread config")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "must provide an Azure Managed Identity in the Azure AD config") {
|
||||
t.Errorf("Received unexpected error from unmarshal of %s: %s", filename, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadInvalidClientIdAzureAdConfig(t *testing.T) {
|
||||
filename := "testdata/azuread_bad_invalidclientid.yaml"
|
||||
_, err := loadAzureAdConfig(filename)
|
||||
if err == nil {
|
||||
t.Fatalf("Did not receive expected error unmarshaling bad azuread config")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "the provided Azure Managed Identity client_id provided is invalid") {
|
||||
t.Errorf("Received unexpected error from unmarshal of %s: %s", filename, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockCredential) GetToken(ctx context.Context, options policy.TokenRequestOptions) (azcore.AccessToken, error) {
|
||||
args := m.MethodCalled("GetToken", ctx, options)
|
||||
if args.Get(0) == nil {
|
||||
return azcore.AccessToken{}, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(azcore.AccessToken), nil
|
||||
}
|
||||
|
||||
func (s *TokenProviderTestSuite) BeforeTest(_, _ string) {
|
||||
s.mockCredential = new(mockCredential)
|
||||
}
|
||||
|
||||
func TestTokenProvider(t *testing.T) {
|
||||
suite.Run(t, new(TokenProviderTestSuite))
|
||||
}
|
||||
|
||||
func (s *TokenProviderTestSuite) TestNewTokenProvider_NilAudience_Fail() {
|
||||
managedIdentityConfig := &ManagedIdentityConfig{
|
||||
ClientID: dummyClientID,
|
||||
}
|
||||
|
||||
azureAdConfig := &AzureADConfig{
|
||||
Cloud: "PublicAzure",
|
||||
ManagedIdentity: managedIdentityConfig,
|
||||
}
|
||||
|
||||
actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential)
|
||||
|
||||
s.Assert().Nil(actualTokenProvider)
|
||||
s.Assert().NotNil(actualErr)
|
||||
s.Assert().Equal("Cloud is not specified or is incorrect: "+azureAdConfig.Cloud, actualErr.Error())
|
||||
}
|
||||
|
||||
func (s *TokenProviderTestSuite) TestNewTokenProvider_Success() {
|
||||
managedIdentityConfig := &ManagedIdentityConfig{
|
||||
ClientID: dummyClientID,
|
||||
}
|
||||
|
||||
azureAdConfig := &AzureADConfig{
|
||||
Cloud: "AzurePublic",
|
||||
ManagedIdentity: managedIdentityConfig,
|
||||
}
|
||||
s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil)
|
||||
|
||||
actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential)
|
||||
|
||||
s.Assert().NotNil(actualTokenProvider)
|
||||
s.Assert().Nil(actualErr)
|
||||
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
|
||||
}
|
||||
|
||||
func (s *TokenProviderTestSuite) TestPeriodicTokenRefresh_Success() {
|
||||
// setup
|
||||
managedIdentityConfig := &ManagedIdentityConfig{
|
||||
ClientID: dummyClientID,
|
||||
}
|
||||
|
||||
azureAdConfig := &AzureADConfig{
|
||||
Cloud: "AzurePublic",
|
||||
ManagedIdentity: managedIdentityConfig,
|
||||
}
|
||||
testToken := &azcore.AccessToken{
|
||||
Token: testTokenString,
|
||||
ExpiresOn: testTokenExpiry,
|
||||
}
|
||||
|
||||
s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once().
|
||||
On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil)
|
||||
|
||||
actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential)
|
||||
|
||||
s.Assert().NotNil(actualTokenProvider)
|
||||
s.Assert().Nil(actualErr)
|
||||
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
|
||||
|
||||
// Token set to refresh at half of the expiry time. The test tokens are set to expiry in 10s.
|
||||
// Hence, the 6 seconds wait to check if the token is refreshed.
|
||||
time.Sleep(6 * time.Second)
|
||||
|
||||
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
|
||||
|
||||
s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2)
|
||||
accessToken, err := actualTokenProvider.getAccessToken(context.Background())
|
||||
s.Assert().Nil(err)
|
||||
s.Assert().NotEqual(accessToken, testTokenString)
|
||||
}
|
||||
|
||||
func getToken() azcore.AccessToken {
|
||||
return azcore.AccessToken{
|
||||
Token: uuid.New().String(),
|
||||
ExpiresOn: time.Now().Add(10 * time.Second),
|
||||
}
|
||||
}
|
1
storage/remote/azuread/testdata/azuread_bad_clientidmissing.yaml
vendored
Normal file
1
storage/remote/azuread/testdata/azuread_bad_clientidmissing.yaml
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
cloud: AzurePublic
|
3
storage/remote/azuread/testdata/azuread_bad_invalidclientid.yaml
vendored
Normal file
3
storage/remote/azuread/testdata/azuread_bad_invalidclientid.yaml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
cloud: AzurePublic
|
||||
managed_identity:
|
||||
client_id: foo-foobar-bar-foo-00000000
|
3
storage/remote/azuread/testdata/azuread_good.yaml
vendored
Normal file
3
storage/remote/azuread/testdata/azuread_good.yaml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
cloud: AzurePublic
|
||||
managed_identity:
|
||||
client_id: 00000000-0000-0000-0000-000000000000
|
2
storage/remote/azuread/testdata/azuread_good_cloudmissing.yaml
vendored
Normal file
2
storage/remote/azuread/testdata/azuread_good_cloudmissing.yaml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
managed_identity:
|
||||
client_id: 00000000-0000-0000-0000-000000000000
|
|
@ -36,6 +36,7 @@ import (
|
|||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/storage/remote/azuread"
|
||||
)
|
||||
|
||||
const maxErrMsgLen = 1024
|
||||
|
@ -97,6 +98,7 @@ type ClientConfig struct {
|
|||
Timeout model.Duration
|
||||
HTTPClientConfig config_util.HTTPClientConfig
|
||||
SigV4Config *sigv4.SigV4Config
|
||||
AzureADConfig *azuread.AzureADConfig
|
||||
Headers map[string]string
|
||||
RetryOnRateLimit bool
|
||||
}
|
||||
|
@ -146,6 +148,13 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if conf.AzureADConfig != nil {
|
||||
t, err = azuread.NewAzureADRoundTripper(conf.AzureADConfig, httpClient.Transport)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(conf.Headers) > 0 {
|
||||
t = newInjectHeadersRoundTripper(conf.Headers, t)
|
||||
}
|
||||
|
|
|
@ -158,6 +158,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
|||
Timeout: rwConf.RemoteTimeout,
|
||||
HTTPClientConfig: rwConf.HTTPClientConfig,
|
||||
SigV4Config: rwConf.SigV4Config,
|
||||
AzureADConfig: rwConf.AzureADConfig,
|
||||
Headers: rwConf.Headers,
|
||||
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
|
||||
})
|
||||
|
|
|
@ -949,12 +949,22 @@ func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error {
|
|||
if len(chkFileIndices) == len(removedFiles) {
|
||||
// All files were deleted. Reset the current sequence.
|
||||
cdm.evtlPosMtx.Lock()
|
||||
if err == nil {
|
||||
cdm.evtlPos.setSeq(0)
|
||||
} else {
|
||||
// In case of error, set it to the last file number on the disk that was not deleted.
|
||||
cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1]))
|
||||
|
||||
// We can safely reset the sequence only if the write queue is empty. If it's not empty,
|
||||
// then there may be a job in the queue that will create a new segment file with an ID
|
||||
// generated before the sequence reset.
|
||||
//
|
||||
// The queueIsEmpty() function must be called while holding the cdm.evtlPosMtx to avoid
|
||||
// a race condition with WriteChunk().
|
||||
if cdm.writeQueue == nil || cdm.writeQueue.queueIsEmpty() {
|
||||
if err == nil {
|
||||
cdm.evtlPos.setSeq(0)
|
||||
} else {
|
||||
// In case of error, set it to the last file number on the disk that was not deleted.
|
||||
cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1]))
|
||||
}
|
||||
}
|
||||
|
||||
cdm.evtlPosMtx.Unlock()
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,9 @@ import (
|
|||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -394,6 +396,56 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
|
|||
verifyFiles([]int{5, 6, 7})
|
||||
}
|
||||
|
||||
func TestChunkDiskMapper_Truncate_WriteQueueRaceCondition(t *testing.T) {
|
||||
hrw := createChunkDiskMapper(t, "")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, hrw.Close())
|
||||
})
|
||||
|
||||
// This test should only run when the queue is enabled.
|
||||
if hrw.writeQueue == nil {
|
||||
t.Skip("This test should only run when the queue is enabled")
|
||||
}
|
||||
|
||||
// Add an artificial delay in the writeChunk function to easily trigger the race condition.
|
||||
origWriteChunk := hrw.writeQueue.writeChunk
|
||||
hrw.writeQueue.writeChunk = func(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
return origWriteChunk(seriesRef, mint, maxt, chk, ref, isOOO, cutFile)
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
|
||||
// Write a chunk. Since the queue is enabled, the chunk will be written asynchronously (with the artificial delay).
|
||||
ref := hrw.WriteChunk(1, 0, 10, randomChunk(t), false, func(err error) {
|
||||
defer wg.Done()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
seq, _ := ref.Unpack()
|
||||
require.Equal(t, 1, seq)
|
||||
|
||||
// Truncate, simulating that all chunks from segment files before 1 can be dropped.
|
||||
require.NoError(t, hrw.Truncate(1))
|
||||
|
||||
// Request to cut a new file when writing the next chunk. If there's a race condition, cutting a new file will
|
||||
// allow us to detect there's actually an issue with the sequence number (because it's checked when a new segment
|
||||
// file is created).
|
||||
hrw.CutNewFile()
|
||||
|
||||
// Write another chunk. This will cut a new file.
|
||||
ref = hrw.WriteChunk(1, 0, 10, randomChunk(t), false, func(err error) {
|
||||
defer wg.Done()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
seq, _ = ref.Unpack()
|
||||
require.Equal(t, 2, seq)
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// TestHeadReadWriter_TruncateAfterIterateChunksError tests for
|
||||
// https://github.com/prometheus/prometheus/issues/7753
|
||||
func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
|
||||
|
|
54
tsdb/db.go
54
tsdb/db.go
|
@ -648,6 +648,60 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
|
|||
return blockReaders, nil
|
||||
}
|
||||
|
||||
// LastBlockID returns the BlockID of latest block.
|
||||
func (db *DBReadOnly) LastBlockID() (string, error) {
|
||||
entries, err := os.ReadDir(db.dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
max := uint64(0)
|
||||
|
||||
lastBlockID := ""
|
||||
|
||||
for _, e := range entries {
|
||||
// Check if dir is a block dir or not.
|
||||
dirName := e.Name()
|
||||
ulidObj, err := ulid.ParseStrict(dirName)
|
||||
if err != nil {
|
||||
continue // Not a block dir.
|
||||
}
|
||||
timestamp := ulidObj.Time()
|
||||
if timestamp > max {
|
||||
max = timestamp
|
||||
lastBlockID = dirName
|
||||
}
|
||||
}
|
||||
|
||||
if lastBlockID == "" {
|
||||
return "", errors.New("no blocks found")
|
||||
}
|
||||
|
||||
return lastBlockID, nil
|
||||
}
|
||||
|
||||
// Block returns a block reader by given block id.
|
||||
func (db *DBReadOnly) Block(blockID string) (BlockReader, error) {
|
||||
select {
|
||||
case <-db.closed:
|
||||
return nil, ErrClosed
|
||||
default:
|
||||
}
|
||||
|
||||
_, err := os.Stat(filepath.Join(db.dir, blockID))
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errors.Errorf("invalid block ID %s", blockID)
|
||||
}
|
||||
|
||||
block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.closers = append(db.closers, block)
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// Close all block readers.
|
||||
func (db *DBReadOnly) Close() error {
|
||||
select {
|
||||
|
|
|
@ -68,7 +68,7 @@ func TestMain(m *testing.M) {
|
|||
// Ignore "ristretto" and its dependency "glog".
|
||||
goleak.IgnoreTopFunction("github.com/dgraph-io/ristretto.(*defaultPolicy).processItems"),
|
||||
goleak.IgnoreTopFunction("github.com/dgraph-io/ristretto.(*Cache).processItems"),
|
||||
goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"),
|
||||
goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -2391,6 +2391,7 @@ func TestDBReadOnly(t *testing.T) {
|
|||
dbDir string
|
||||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
expBlocks []*Block
|
||||
expBlock *Block
|
||||
expSeries map[string][]tsdbutil.Sample
|
||||
expChunks map[string][][]tsdbutil.Sample
|
||||
expDBHash []byte
|
||||
|
@ -2434,6 +2435,7 @@ func TestDBReadOnly(t *testing.T) {
|
|||
require.NoError(t, app.Commit())
|
||||
|
||||
expBlocks = dbWritable.Blocks()
|
||||
expBlock = expBlocks[0]
|
||||
expDbSize, err := fileutil.DirSize(dbWritable.Dir())
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, expDbSize, dbSizeBeforeAppend, "db size didn't increase after an append")
|
||||
|
@ -2462,7 +2464,22 @@ func TestDBReadOnly(t *testing.T) {
|
|||
require.Equal(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("block", func(t *testing.T) {
|
||||
blockID := expBlock.meta.ULID.String()
|
||||
block, err := dbReadOnly.Block(blockID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expBlock.Meta(), block.Meta(), "block meta mismatch")
|
||||
})
|
||||
t.Run("invalid block ID", func(t *testing.T) {
|
||||
blockID := "01GTDVZZF52NSWB5SXQF0P2PGF"
|
||||
_, err := dbReadOnly.Block(blockID)
|
||||
require.Error(t, err)
|
||||
})
|
||||
t.Run("last block ID", func(t *testing.T) {
|
||||
blockID, err := dbReadOnly.LastBlockID()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expBlocks[2].Meta().ULID.String(), blockID)
|
||||
})
|
||||
t.Run("querier", func(t *testing.T) {
|
||||
// Open a read only db and ensure that the API returns the same result as the normal DB.
|
||||
q, err := dbReadOnly.Querier(context.TODO(), math.MinInt64, math.MaxInt64)
|
||||
|
|
|
@ -216,9 +216,9 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.
|
|||
return ce.validateExemplar(seriesLabels, e, false)
|
||||
}
|
||||
|
||||
// Not thread safe. The append parameters tells us whether this is an external validation, or internal
|
||||
// Not thread safe. The appended parameters tells us whether this is an external validation, or internal
|
||||
// as a result of an AddExemplar call, in which case we should update any relevant metrics.
|
||||
func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error {
|
||||
func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, appended bool) error {
|
||||
if len(ce.exemplars) == 0 {
|
||||
return storage.ErrExemplarsDisabled
|
||||
}
|
||||
|
@ -250,7 +250,7 @@ func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemp
|
|||
}
|
||||
|
||||
if e.Ts <= ce.exemplars[idx.newest].exemplar.Ts {
|
||||
if append {
|
||||
if appended {
|
||||
ce.metrics.outOfOrderExemplars.Inc()
|
||||
}
|
||||
return storage.ErrOutOfOrderExemplar
|
||||
|
|
|
@ -1641,7 +1641,7 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e
|
|||
|
||||
func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) {
|
||||
s, created, err := h.series.getOrSet(hash, lset, func() *memSeries {
|
||||
return newMemSeries(lset, id, labels.StableHash(lset), h.opts.ChunkEndTimeVariance, h.opts.IsolationDisabled, h.opts.SamplesPerChunk)
|
||||
return newMemSeries(lset, id, labels.StableHash(lset), h.opts.ChunkEndTimeVariance, h.opts.IsolationDisabled)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
|
@ -1956,8 +1956,7 @@ type memSeries struct {
|
|||
// to spread chunks writing across time. Doesn't apply to the last chunk of the chunk range. 0 to disable variance.
|
||||
chunkEndTimeVariance float64
|
||||
|
||||
samplesPerChunk int // Target number of samples per chunk.
|
||||
nextAt int64 // Timestamp at which to cut the next chunk.
|
||||
nextAt int64 // Timestamp at which to cut the next chunk.
|
||||
|
||||
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
|
||||
lastValue float64
|
||||
|
@ -1985,14 +1984,13 @@ type memSeriesOOOFields struct {
|
|||
firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0].
|
||||
}
|
||||
|
||||
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, chunkEndTimeVariance float64, isolationDisabled bool, samplesPerChunk int) *memSeries {
|
||||
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, chunkEndTimeVariance float64, isolationDisabled bool) *memSeries {
|
||||
s := &memSeries{
|
||||
lset: lset,
|
||||
ref: id,
|
||||
nextAt: math.MinInt64,
|
||||
chunkEndTimeVariance: chunkEndTimeVariance,
|
||||
shardHash: shardHash,
|
||||
samplesPerChunk: samplesPerChunk,
|
||||
}
|
||||
if !isolationDisabled {
|
||||
s.txs = newTxRing(4)
|
||||
|
|
|
@ -888,9 +888,13 @@ func (a *headAppender) Commit() (err error) {
|
|||
oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef
|
||||
oooRecords [][]byte
|
||||
oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
|
||||
chunkRange = a.head.chunkRange.Load()
|
||||
series *memSeries
|
||||
enc record.Encoder
|
||||
appendChunkOpts = chunkOpts{
|
||||
chunkDiskMapper: a.head.chunkDiskMapper,
|
||||
chunkRange: a.head.chunkRange.Load(),
|
||||
samplesPerChunk: a.head.opts.SamplesPerChunk,
|
||||
}
|
||||
enc record.Encoder
|
||||
)
|
||||
defer func() {
|
||||
for i := range oooRecords {
|
||||
|
@ -994,7 +998,7 @@ func (a *headAppender) Commit() (err error) {
|
|||
samplesAppended--
|
||||
}
|
||||
default:
|
||||
ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = series.append(s.T, s.V, a.appendID, appendChunkOpts)
|
||||
if ok {
|
||||
if s.T < inOrderMint {
|
||||
inOrderMint = s.T
|
||||
|
@ -1023,7 +1027,7 @@ func (a *headAppender) Commit() (err error) {
|
|||
for i, s := range a.histograms {
|
||||
series = a.histogramSeries[i]
|
||||
series.Lock()
|
||||
ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, appendChunkOpts)
|
||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||
series.pendingCommit = false
|
||||
series.Unlock()
|
||||
|
@ -1049,7 +1053,7 @@ func (a *headAppender) Commit() (err error) {
|
|||
for i, s := range a.floatHistograms {
|
||||
series = a.floatHistogramSeries[i]
|
||||
series.Lock()
|
||||
ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, a.head.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, appendChunkOpts)
|
||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||
series.pendingCommit = false
|
||||
series.Unlock()
|
||||
|
@ -1125,12 +1129,19 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper chunkDiskMapper,
|
|||
return ok, chunkCreated, mmapRef
|
||||
}
|
||||
|
||||
// chunkOpts are chunk-level options that are passed when appending to a memSeries.
|
||||
type chunkOpts struct {
|
||||
chunkDiskMapper chunkDiskMapper
|
||||
chunkRange int64
|
||||
samplesPerChunk int
|
||||
}
|
||||
|
||||
// append adds the sample (t, v) to the series. The caller also has to provide
|
||||
// the appendID for isolation. (The appendID can be zero, which results in no
|
||||
// isolation for this append.)
|
||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||
func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper chunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange)
|
||||
func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) {
|
||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o)
|
||||
if !sampleInOrder {
|
||||
return sampleInOrder, chunkCreated
|
||||
}
|
||||
|
@ -1151,7 +1162,7 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper
|
|||
|
||||
// appendHistogram adds the histogram.
|
||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||
func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper chunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
||||
func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) {
|
||||
// Head controls the execution of recoding, so that we own the proper
|
||||
// chunk reference afterwards. We check for Appendable from appender before
|
||||
// appendPreprocessor because in case it ends up creating a new chunk,
|
||||
|
@ -1164,7 +1175,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
|||
pMergedSpans, nMergedSpans []histogram.Span
|
||||
okToAppend, counterReset, gauge bool
|
||||
)
|
||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o)
|
||||
if !sampleInOrder {
|
||||
return sampleInOrder, chunkCreated
|
||||
}
|
||||
|
@ -1200,7 +1211,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
|||
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
||||
switch {
|
||||
case !okToAppend || counterReset:
|
||||
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
||||
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, o.chunkDiskMapper, o.chunkRange)
|
||||
chunkCreated = true
|
||||
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
|
||||
// New buckets have appeared. We need to recode all
|
||||
|
@ -1245,7 +1256,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
|||
|
||||
// appendFloatHistogram adds the float histogram.
|
||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper chunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
||||
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) {
|
||||
// Head controls the execution of recoding, so that we own the proper
|
||||
// chunk reference afterwards. We check for Appendable from appender before
|
||||
// appendPreprocessor because in case it ends up creating a new chunk,
|
||||
|
@ -1258,7 +1269,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
|||
pMergedSpans, nMergedSpans []histogram.Span
|
||||
okToAppend, counterReset, gauge bool
|
||||
)
|
||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
|
||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o)
|
||||
if !sampleInOrder {
|
||||
return sampleInOrder, chunkCreated
|
||||
}
|
||||
|
@ -1294,7 +1305,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
|||
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
||||
switch {
|
||||
case !okToAppend || counterReset:
|
||||
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
|
||||
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, o.chunkDiskMapper, o.chunkRange)
|
||||
chunkCreated = true
|
||||
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
|
||||
// New buckets have appeared. We need to recode all
|
||||
|
@ -1340,9 +1351,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
|||
// appendPreprocessor takes care of cutting new chunks and m-mapping old chunks.
|
||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||
// This should be called only when appending data.
|
||||
func (s *memSeries) appendPreprocessor(
|
||||
t int64, e chunkenc.Encoding, chunkDiskMapper chunkDiskMapper, chunkRange int64,
|
||||
) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
||||
func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
||||
c = s.head()
|
||||
|
||||
if c == nil {
|
||||
|
@ -1351,7 +1360,7 @@ func (s *memSeries) appendPreprocessor(
|
|||
return c, false, false
|
||||
}
|
||||
// There is no head chunk in this series yet, create the first chunk for the sample.
|
||||
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
||||
c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange)
|
||||
chunkCreated = true
|
||||
}
|
||||
|
||||
|
@ -1363,7 +1372,7 @@ func (s *memSeries) appendPreprocessor(
|
|||
if c.chunk.Encoding() != e {
|
||||
// The chunk encoding expected by this append is different than the head chunk's
|
||||
// encoding. So we cut a new chunk with the expected encoding.
|
||||
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
||||
c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange)
|
||||
chunkCreated = true
|
||||
}
|
||||
|
||||
|
@ -1372,14 +1381,14 @@ func (s *memSeries) appendPreprocessor(
|
|||
// It could be the new chunk created after reading the chunk snapshot,
|
||||
// hence we fix the minTime of the chunk here.
|
||||
c.minTime = t
|
||||
s.nextAt = rangeForTimestamp(c.minTime, chunkRange)
|
||||
s.nextAt = rangeForTimestamp(c.minTime, o.chunkRange)
|
||||
}
|
||||
|
||||
// If we reach 25% of a chunk's desired sample count, predict an end time
|
||||
// for this chunk that will try to make samples equally distributed within
|
||||
// the remaining chunks in the current chunk range.
|
||||
// At latest it must happen at the timestamp set when the chunk was cut.
|
||||
if numSamples == s.samplesPerChunk/4 {
|
||||
if numSamples == o.samplesPerChunk/4 {
|
||||
maxNextAt := s.nextAt
|
||||
|
||||
s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, maxNextAt)
|
||||
|
@ -1390,8 +1399,8 @@ func (s *memSeries) appendPreprocessor(
|
|||
// Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk
|
||||
// as we expect more chunks to come.
|
||||
// Note that next chunk will have its nextAt recalculated for the new rate.
|
||||
if t >= s.nextAt || numSamples >= s.samplesPerChunk*2 {
|
||||
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
||||
if t >= s.nextAt || numSamples >= o.samplesPerChunk*2 {
|
||||
c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange)
|
||||
chunkCreated = true
|
||||
}
|
||||
|
||||
|
|
|
@ -283,11 +283,16 @@ func BenchmarkLoadWAL(b *testing.B) {
|
|||
if c.mmappedChunkT != 0 {
|
||||
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, mmappedChunksDir(dir), chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
|
||||
require.NoError(b, err)
|
||||
cOpts := chunkOpts{
|
||||
chunkDiskMapper: chunkDiskMapper,
|
||||
chunkRange: c.mmappedChunkT,
|
||||
samplesPerChunk: DefaultSamplesPerChunk,
|
||||
}
|
||||
for k := 0; k < c.batches*c.seriesPerBatch; k++ {
|
||||
// Create one mmapped chunk per series, with one sample at the given time.
|
||||
lbls := labels.Labels{}
|
||||
s := newMemSeries(lbls, chunks.HeadSeriesRef(k)*101, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT)
|
||||
s := newMemSeries(lbls, chunks.HeadSeriesRef(k)*101, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
s.append(c.mmappedChunkT, 42, 0, cOpts)
|
||||
s.mmapCurrentHeadChunk(chunkDiskMapper)
|
||||
}
|
||||
require.NoError(b, chunkDiskMapper.Close())
|
||||
|
@ -800,7 +805,11 @@ func TestMemSeries_truncateChunks(t *testing.T) {
|
|||
defer func() {
|
||||
require.NoError(t, chunkDiskMapper.Close())
|
||||
}()
|
||||
const chunkRange = 2000
|
||||
cOpts := chunkOpts{
|
||||
chunkDiskMapper: chunkDiskMapper,
|
||||
chunkRange: 2000,
|
||||
samplesPerChunk: DefaultSamplesPerChunk,
|
||||
}
|
||||
|
||||
memChunkPool := sync.Pool{
|
||||
New: func() interface{} {
|
||||
|
@ -809,10 +818,10 @@ func TestMemSeries_truncateChunks(t *testing.T) {
|
|||
}
|
||||
|
||||
lbls := labels.FromStrings("a", "b")
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
|
||||
for i := 0; i < 4000; i += 5 {
|
||||
ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange)
|
||||
ok, _ := s.append(int64(i), float64(i), 0, cOpts)
|
||||
require.True(t, ok, "sample append failed")
|
||||
}
|
||||
|
||||
|
@ -1338,27 +1347,31 @@ func TestMemSeries_append(t *testing.T) {
|
|||
defer func() {
|
||||
require.NoError(t, chunkDiskMapper.Close())
|
||||
}()
|
||||
const chunkRange = 500
|
||||
cOpts := chunkOpts{
|
||||
chunkDiskMapper: chunkDiskMapper,
|
||||
chunkRange: 500,
|
||||
samplesPerChunk: DefaultSamplesPerChunk,
|
||||
}
|
||||
|
||||
lbls := labels.Labels{}
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
|
||||
// Add first two samples at the very end of a chunk range and the next two
|
||||
// on and after it.
|
||||
// New chunk must correctly be cut at 1000.
|
||||
ok, chunkCreated := s.append(998, 1, 0, chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated := s.append(998, 1, 0, cOpts)
|
||||
require.True(t, ok, "append failed")
|
||||
require.True(t, chunkCreated, "first sample created chunk")
|
||||
|
||||
ok, chunkCreated = s.append(999, 2, 0, chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.append(999, 2, 0, cOpts)
|
||||
require.True(t, ok, "append failed")
|
||||
require.False(t, chunkCreated, "second sample should use same chunk")
|
||||
|
||||
ok, chunkCreated = s.append(1000, 3, 0, chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.append(1000, 3, 0, cOpts)
|
||||
require.True(t, ok, "append failed")
|
||||
require.True(t, chunkCreated, "expected new chunk on boundary")
|
||||
|
||||
ok, chunkCreated = s.append(1001, 4, 0, chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.append(1001, 4, 0, cOpts)
|
||||
require.True(t, ok, "append failed")
|
||||
require.False(t, chunkCreated, "second sample should use same chunk")
|
||||
|
||||
|
@ -1371,7 +1384,7 @@ func TestMemSeries_append(t *testing.T) {
|
|||
// Fill the range [1000,2000) with many samples. Intermediate chunks should be cut
|
||||
// at approximately 120 samples per chunk.
|
||||
for i := 1; i < 1000; i++ {
|
||||
ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange)
|
||||
ok, _ := s.append(1001+int64(i), float64(i), 0, cOpts)
|
||||
require.True(t, ok, "append failed")
|
||||
}
|
||||
|
||||
|
@ -1393,10 +1406,14 @@ func TestMemSeries_appendHistogram(t *testing.T) {
|
|||
defer func() {
|
||||
require.NoError(t, chunkDiskMapper.Close())
|
||||
}()
|
||||
chunkRange := int64(1000)
|
||||
cOpts := chunkOpts{
|
||||
chunkDiskMapper: chunkDiskMapper,
|
||||
chunkRange: int64(1000),
|
||||
samplesPerChunk: DefaultSamplesPerChunk,
|
||||
}
|
||||
|
||||
lbls := labels.Labels{}
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
|
||||
histograms := tsdbutil.GenerateTestHistograms(4)
|
||||
histogramWithOneMoreBucket := histograms[3].Copy()
|
||||
|
@ -1408,19 +1425,19 @@ func TestMemSeries_appendHistogram(t *testing.T) {
|
|||
// Add first two samples at the very end of a chunk range and the next two
|
||||
// on and after it.
|
||||
// New chunk must correctly be cut at 1000.
|
||||
ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, cOpts)
|
||||
require.True(t, ok, "append failed")
|
||||
require.True(t, chunkCreated, "first sample created chunk")
|
||||
|
||||
ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, cOpts)
|
||||
require.True(t, ok, "append failed")
|
||||
require.False(t, chunkCreated, "second sample should use same chunk")
|
||||
|
||||
ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, cOpts)
|
||||
require.True(t, ok, "append failed")
|
||||
require.True(t, chunkCreated, "expected new chunk on boundary")
|
||||
|
||||
ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, cOpts)
|
||||
require.True(t, ok, "append failed")
|
||||
require.False(t, chunkCreated, "second sample should use same chunk")
|
||||
|
||||
|
@ -1430,7 +1447,7 @@ func TestMemSeries_appendHistogram(t *testing.T) {
|
|||
require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range")
|
||||
require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range")
|
||||
|
||||
ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, cOpts)
|
||||
require.True(t, ok, "append failed")
|
||||
require.False(t, chunkCreated, "third sample should trigger a re-encoded chunk")
|
||||
|
||||
|
@ -1450,10 +1467,14 @@ func TestMemSeries_append_atVariableRate(t *testing.T) {
|
|||
t.Cleanup(func() {
|
||||
require.NoError(t, chunkDiskMapper.Close())
|
||||
})
|
||||
chunkRange := DefaultBlockDuration
|
||||
cOpts := chunkOpts{
|
||||
chunkDiskMapper: chunkDiskMapper,
|
||||
chunkRange: DefaultBlockDuration,
|
||||
samplesPerChunk: samplesPerChunk,
|
||||
}
|
||||
|
||||
lbls := labels.Labels{}
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
|
||||
// At this slow rate, we will fill the chunk in two block durations.
|
||||
slowRate := (DefaultBlockDuration * 2) / samplesPerChunk
|
||||
|
@ -1461,7 +1482,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) {
|
|||
var nextTs int64
|
||||
var totalAppendedSamples int
|
||||
for i := 0; i < samplesPerChunk/4; i++ {
|
||||
ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange)
|
||||
ok, _ := s.append(nextTs, float64(i), 0, cOpts)
|
||||
require.Truef(t, ok, "slow sample %d was not appended", i)
|
||||
nextTs += slowRate
|
||||
totalAppendedSamples++
|
||||
|
@ -1470,12 +1491,12 @@ func TestMemSeries_append_atVariableRate(t *testing.T) {
|
|||
|
||||
// Suddenly, the rate increases and we receive a sample every millisecond.
|
||||
for i := 0; i < math.MaxUint16; i++ {
|
||||
ok, _ := s.append(nextTs, float64(i), 0, chunkDiskMapper, chunkRange)
|
||||
ok, _ := s.append(nextTs, float64(i), 0, cOpts)
|
||||
require.Truef(t, ok, "quick sample %d was not appended", i)
|
||||
nextTs++
|
||||
totalAppendedSamples++
|
||||
}
|
||||
ok, chunkCreated := s.append(DefaultBlockDuration, float64(0), 0, chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated := s.append(DefaultBlockDuration, float64(0), 0, cOpts)
|
||||
require.True(t, ok, "new chunk sample was not appended")
|
||||
require.True(t, chunkCreated, "sample at block duration timestamp should create a new chunk")
|
||||
|
||||
|
@ -1495,23 +1516,29 @@ func TestGCChunkAccess(t *testing.T) {
|
|||
require.NoError(t, h.Close())
|
||||
}()
|
||||
|
||||
cOpts := chunkOpts{
|
||||
chunkDiskMapper: h.chunkDiskMapper,
|
||||
chunkRange: chunkRange,
|
||||
samplesPerChunk: DefaultSamplesPerChunk,
|
||||
}
|
||||
|
||||
h.initTime(0)
|
||||
|
||||
s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1"))
|
||||
|
||||
// Appending 2 samples for the first chunk.
|
||||
ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated := s.append(0, 0, 0, cOpts)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.True(t, chunkCreated, "chunks was not created")
|
||||
ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.append(999, 999, 0, cOpts)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.False(t, chunkCreated, "chunks was created")
|
||||
|
||||
// A new chunks should be created here as it's beyond the chunk range.
|
||||
ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.append(1000, 1000, 0, cOpts)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.True(t, chunkCreated, "chunks was not created")
|
||||
ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.append(1999, 1999, 0, cOpts)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.False(t, chunkCreated, "chunks was created")
|
||||
|
||||
|
@ -1548,23 +1575,29 @@ func TestGCSeriesAccess(t *testing.T) {
|
|||
require.NoError(t, h.Close())
|
||||
}()
|
||||
|
||||
cOpts := chunkOpts{
|
||||
chunkDiskMapper: h.chunkDiskMapper,
|
||||
chunkRange: chunkRange,
|
||||
samplesPerChunk: DefaultSamplesPerChunk,
|
||||
}
|
||||
|
||||
h.initTime(0)
|
||||
|
||||
s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1"))
|
||||
|
||||
// Appending 2 samples for the first chunk.
|
||||
ok, chunkCreated := s.append(0, 0, 0, h.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated := s.append(0, 0, 0, cOpts)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.True(t, chunkCreated, "chunks was not created")
|
||||
ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.append(999, 999, 0, cOpts)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.False(t, chunkCreated, "chunks was created")
|
||||
|
||||
// A new chunks should be created here as it's beyond the chunk range.
|
||||
ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.append(1000, 1000, 0, cOpts)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.True(t, chunkCreated, "chunks was not created")
|
||||
ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.append(1999, 1999, 0, cOpts)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.False(t, chunkCreated, "chunks was created")
|
||||
|
||||
|
@ -1796,14 +1829,20 @@ func TestHeadReadWriterRepair(t *testing.T) {
|
|||
require.Equal(t, 0.0, prom_testutil.ToFloat64(h.metrics.mmapChunkCorruptionTotal))
|
||||
require.NoError(t, h.Init(math.MinInt64))
|
||||
|
||||
cOpts := chunkOpts{
|
||||
chunkDiskMapper: h.chunkDiskMapper,
|
||||
chunkRange: chunkRange,
|
||||
samplesPerChunk: DefaultSamplesPerChunk,
|
||||
}
|
||||
|
||||
s, created, _ := h.getOrCreate(1, labels.FromStrings("a", "1"))
|
||||
require.True(t, created, "series was not created")
|
||||
|
||||
for i := 0; i < 7; i++ {
|
||||
ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, cOpts)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.True(t, chunkCreated, "chunk was not created")
|
||||
ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper, chunkRange)
|
||||
ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, cOpts)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.False(t, chunkCreated, "chunk was created")
|
||||
require.NoError(t, h.chunkDiskMapper.CutNewFile())
|
||||
|
@ -2149,9 +2188,15 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) {
|
|||
|
||||
h.initTime(0)
|
||||
|
||||
cOpts := chunkOpts{
|
||||
chunkDiskMapper: h.chunkDiskMapper,
|
||||
chunkRange: h.chunkRange.Load(),
|
||||
samplesPerChunk: DefaultSamplesPerChunk,
|
||||
}
|
||||
|
||||
s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1"))
|
||||
|
||||
ok, _ := s.append(0, 0, 0, h.chunkDiskMapper, h.chunkRange.Load())
|
||||
ok, _ := s.append(0, 0, 0, cOpts)
|
||||
require.True(t, ok, "Series append failed.")
|
||||
require.Equal(t, 0, s.txs.txIDCount, "Series should not have an appendID after append with appendID=0.")
|
||||
}
|
||||
|
@ -2674,13 +2719,17 @@ func TestIteratorSeekIntoBuffer(t *testing.T) {
|
|||
defer func() {
|
||||
require.NoError(t, chunkDiskMapper.Close())
|
||||
}()
|
||||
const chunkRange = 500
|
||||
cOpts := chunkOpts{
|
||||
chunkDiskMapper: chunkDiskMapper,
|
||||
chunkRange: 500,
|
||||
samplesPerChunk: DefaultSamplesPerChunk,
|
||||
}
|
||||
|
||||
lbls := labels.Labels{}
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
|
||||
for i := 0; i < 7; i++ {
|
||||
ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange)
|
||||
ok, _ := s.append(int64(i), float64(i), 0, cOpts)
|
||||
require.True(t, ok, "sample append failed")
|
||||
}
|
||||
|
||||
|
|
|
@ -566,7 +566,11 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
|||
|
||||
minValidTime := h.minValidTime.Load()
|
||||
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
|
||||
chunkRange := h.chunkRange.Load()
|
||||
appendChunkOpts := chunkOpts{
|
||||
chunkDiskMapper: h.chunkDiskMapper,
|
||||
chunkRange: h.chunkRange.Load(),
|
||||
samplesPerChunk: h.opts.SamplesPerChunk,
|
||||
}
|
||||
|
||||
for in := range wp.input {
|
||||
if in.existingSeries != nil {
|
||||
|
@ -590,7 +594,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
|||
if s.T <= ms.mmMaxTime {
|
||||
continue
|
||||
}
|
||||
if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange); chunkCreated {
|
||||
if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated {
|
||||
h.metrics.chunksCreated.Inc()
|
||||
h.metrics.chunks.Inc()
|
||||
}
|
||||
|
@ -620,9 +624,9 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
|||
}
|
||||
var chunkCreated bool
|
||||
if s.h != nil {
|
||||
_, chunkCreated = ms.appendHistogram(s.t, s.h, 0, h.chunkDiskMapper, chunkRange)
|
||||
_, chunkCreated = ms.appendHistogram(s.t, s.h, 0, appendChunkOpts)
|
||||
} else {
|
||||
_, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, h.chunkDiskMapper, chunkRange)
|
||||
_, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, appendChunkOpts)
|
||||
}
|
||||
if chunkCreated {
|
||||
h.metrics.chunksCreated.Inc()
|
||||
|
|
|
@ -244,9 +244,9 @@ type txRing struct {
|
|||
txIDCount int // How many ids in the ring.
|
||||
}
|
||||
|
||||
func newTxRing(cap int) *txRing {
|
||||
func newTxRing(capacity int) *txRing {
|
||||
return &txRing{
|
||||
txIDs: make([]uint64, cap),
|
||||
txIDs: make([]uint64, capacity),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -115,6 +115,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
|||
jXplus := labels.MustNewMatcher(labels.MatchRegexp, "j", "X.+")
|
||||
iCharSet := labels.MustNewMatcher(labels.MatchRegexp, "i", "1[0-9]")
|
||||
iAlternate := labels.MustNewMatcher(labels.MatchRegexp, "i", "(1|2|3|4|5|6|20|55)")
|
||||
iNotAlternate := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "(1|2|3|4|5|6|20|55)")
|
||||
iXYZ := labels.MustNewMatcher(labels.MatchRegexp, "i", "X|Y|Z")
|
||||
iNotXYZ := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "X|Y|Z")
|
||||
cases := []struct {
|
||||
|
@ -134,6 +135,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
|||
{`j=~"XXX|YYY"`, []*labels.Matcher{jXXXYYY}},
|
||||
{`j=~"X.+"`, []*labels.Matcher{jXplus}},
|
||||
{`i=~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iAlternate}},
|
||||
{`i!~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iNotAlternate}},
|
||||
{`i=~"X|Y|Z"`, []*labels.Matcher{iXYZ}},
|
||||
{`i!~"X|Y|Z"`, []*labels.Matcher{iNotXYZ}},
|
||||
{`i=~".*"`, []*labels.Matcher{iStar}},
|
||||
|
@ -163,6 +165,8 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
|||
|
||||
for _, c := range cases {
|
||||
b.Run(c.name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := PostingsForMatchers(ir, c.matchers...)
|
||||
require.NoError(b, err)
|
||||
|
|
|
@ -2070,6 +2070,12 @@ func TestPostingsForMatchers(t *testing.T) {
|
|||
labels.FromStrings("n", "2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "(1|2.5)")},
|
||||
exp: []labels.Labels{
|
||||
labels.FromStrings("n", "2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a$")},
|
||||
exp: []labels.Labels{
|
||||
|
@ -2131,6 +2137,13 @@ func TestPostingsForMatchers(t *testing.T) {
|
|||
labels.FromStrings("n", "1", "i", "b"),
|
||||
},
|
||||
},
|
||||
{
|
||||
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "(a|b)")},
|
||||
exp: []labels.Labels{
|
||||
labels.FromStrings("n", "1", "i", "a"),
|
||||
labels.FromStrings("n", "1", "i", "b"),
|
||||
},
|
||||
},
|
||||
{
|
||||
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "x1|2")},
|
||||
exp: []labels.Labels{
|
||||
|
@ -2153,6 +2166,14 @@ func TestPostingsForMatchers(t *testing.T) {
|
|||
labels.FromStrings("n", "2.5"),
|
||||
},
|
||||
},
|
||||
{
|
||||
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", "(c||d)")},
|
||||
exp: []labels.Labels{
|
||||
labels.FromStrings("n", "1"),
|
||||
labels.FromStrings("n", "2"),
|
||||
labels.FromStrings("n", "2.5"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ir, err := h.Index()
|
||||
|
|
203
util/fmtutil/format.go
Normal file
203
util/fmtutil/format.go
Normal file
|
@ -0,0 +1,203 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fmtutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
const (
|
||||
sumStr = "_sum"
|
||||
countStr = "_count"
|
||||
bucketStr = "_bucket"
|
||||
)
|
||||
|
||||
var MetricMetadataTypeValue = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"COUNTER": 1,
|
||||
"GAUGE": 2,
|
||||
"HISTOGRAM": 3,
|
||||
"GAUGEHISTOGRAM": 4,
|
||||
"SUMMARY": 5,
|
||||
"INFO": 6,
|
||||
"STATESET": 7,
|
||||
}
|
||||
|
||||
// MetricTextToWriteRequest consumes an io.Reader and return the data in write request format.
|
||||
func MetricTextToWriteRequest(input io.Reader, labels map[string]string) (*prompb.WriteRequest, error) {
|
||||
var parser expfmt.TextParser
|
||||
mf, err := parser.TextToMetricFamilies(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return MetricFamiliesToWriteRequest(mf, labels)
|
||||
}
|
||||
|
||||
// MetricFamiliesToWriteRequest convert metric family to a writerequest.
|
||||
func MetricFamiliesToWriteRequest(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prompb.WriteRequest, error) {
|
||||
wr := &prompb.WriteRequest{}
|
||||
|
||||
// build metric list
|
||||
sortedMetricNames := make([]string, 0, len(mf))
|
||||
for metric := range mf {
|
||||
sortedMetricNames = append(sortedMetricNames, metric)
|
||||
}
|
||||
// sort metrics name in lexicographical order
|
||||
sort.Strings(sortedMetricNames)
|
||||
|
||||
for _, metricName := range sortedMetricNames {
|
||||
// Set metadata writerequest
|
||||
mtype := MetricMetadataTypeValue[mf[metricName].Type.String()]
|
||||
metadata := prompb.MetricMetadata{
|
||||
MetricFamilyName: mf[metricName].GetName(),
|
||||
Type: prompb.MetricMetadata_MetricType(mtype),
|
||||
Help: mf[metricName].GetHelp(),
|
||||
}
|
||||
wr.Metadata = append(wr.Metadata, metadata)
|
||||
|
||||
for _, metric := range mf[metricName].Metric {
|
||||
labels := makeLabelsMap(metric, metricName, extraLabels)
|
||||
if err := makeTimeseries(wr, labels, metric); err != nil {
|
||||
return wr, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return wr, nil
|
||||
}
|
||||
|
||||
func toTimeseries(wr *prompb.WriteRequest, labels map[string]string, timestamp int64, value float64) {
|
||||
var ts prompb.TimeSeries
|
||||
ts.Labels = makeLabels(labels)
|
||||
ts.Samples = []prompb.Sample{
|
||||
{
|
||||
Timestamp: timestamp,
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
wr.Timeseries = append(wr.Timeseries, ts)
|
||||
}
|
||||
|
||||
func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Metric) error {
|
||||
var err error
|
||||
|
||||
timestamp := m.GetTimestampMs()
|
||||
if timestamp == 0 {
|
||||
timestamp = time.Now().UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
|
||||
switch {
|
||||
case m.Gauge != nil:
|
||||
toTimeseries(wr, labels, timestamp, m.GetGauge().GetValue())
|
||||
case m.Counter != nil:
|
||||
toTimeseries(wr, labels, timestamp, m.GetCounter().GetValue())
|
||||
case m.Summary != nil:
|
||||
metricName := labels[model.MetricNameLabel]
|
||||
// Preserve metric name order with first quantile labels timeseries then sum suffix timeserie and finally count suffix timeserie
|
||||
// Add Summary quantile timeseries
|
||||
quantileLabels := make(map[string]string, len(labels)+1)
|
||||
for key, value := range labels {
|
||||
quantileLabels[key] = value
|
||||
}
|
||||
|
||||
for _, q := range m.GetSummary().Quantile {
|
||||
quantileLabels[model.QuantileLabel] = fmt.Sprint(q.GetQuantile())
|
||||
toTimeseries(wr, quantileLabels, timestamp, q.GetValue())
|
||||
}
|
||||
// Overwrite label model.MetricNameLabel for count and sum metrics
|
||||
// Add Summary sum timeserie
|
||||
labels[model.MetricNameLabel] = metricName + sumStr
|
||||
toTimeseries(wr, labels, timestamp, m.GetSummary().GetSampleSum())
|
||||
// Add Summary count timeserie
|
||||
labels[model.MetricNameLabel] = metricName + countStr
|
||||
toTimeseries(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount()))
|
||||
|
||||
case m.Histogram != nil:
|
||||
metricName := labels[model.MetricNameLabel]
|
||||
// Preserve metric name order with first bucket suffix timeseries then sum suffix timeserie and finally count suffix timeserie
|
||||
// Add Histogram bucket timeseries
|
||||
bucketLabels := make(map[string]string, len(labels)+1)
|
||||
for key, value := range labels {
|
||||
bucketLabels[key] = value
|
||||
}
|
||||
for _, b := range m.GetHistogram().Bucket {
|
||||
bucketLabels[model.MetricNameLabel] = metricName + bucketStr
|
||||
bucketLabels[model.BucketLabel] = fmt.Sprint(b.GetUpperBound())
|
||||
toTimeseries(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount()))
|
||||
}
|
||||
// Overwrite label model.MetricNameLabel for count and sum metrics
|
||||
// Add Histogram sum timeserie
|
||||
labels[model.MetricNameLabel] = metricName + sumStr
|
||||
toTimeseries(wr, labels, timestamp, m.GetHistogram().GetSampleSum())
|
||||
// Add Histogram count timeserie
|
||||
labels[model.MetricNameLabel] = metricName + countStr
|
||||
toTimeseries(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount()))
|
||||
|
||||
case m.Untyped != nil:
|
||||
toTimeseries(wr, labels, timestamp, m.GetUntyped().GetValue())
|
||||
default:
|
||||
err = errors.New("unsupported metric type")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func makeLabels(labelsMap map[string]string) []prompb.Label {
|
||||
// build labels name list
|
||||
sortedLabelNames := make([]string, 0, len(labelsMap))
|
||||
for label := range labelsMap {
|
||||
sortedLabelNames = append(sortedLabelNames, label)
|
||||
}
|
||||
// sort labels name in lexicographical order
|
||||
sort.Strings(sortedLabelNames)
|
||||
|
||||
var labels []prompb.Label
|
||||
for _, label := range sortedLabelNames {
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: label,
|
||||
Value: labelsMap[label],
|
||||
})
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
func makeLabelsMap(m *dto.Metric, metricName string, extraLabels map[string]string) map[string]string {
|
||||
// build labels map
|
||||
labels := make(map[string]string, len(m.Label)+len(extraLabels))
|
||||
labels[model.MetricNameLabel] = metricName
|
||||
|
||||
// add extra labels
|
||||
for key, value := range extraLabels {
|
||||
labels[key] = value
|
||||
}
|
||||
|
||||
// add metric labels
|
||||
for _, label := range m.Label {
|
||||
labelname := label.GetName()
|
||||
if labelname == model.JobLabel {
|
||||
labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname)
|
||||
}
|
||||
labels[labelname] = label.GetValue()
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
233
util/fmtutil/format_test.go
Normal file
233
util/fmtutil/format_test.go
Normal file
|
@ -0,0 +1,233 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fmtutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
var writeRequestFixture = &prompb.WriteRequest{
|
||||
Metadata: []prompb.MetricMetadata{
|
||||
{
|
||||
MetricFamilyName: "http_request_duration_seconds",
|
||||
Type: 3,
|
||||
Help: "A histogram of the request duration.",
|
||||
},
|
||||
{
|
||||
MetricFamilyName: "http_requests_total",
|
||||
Type: 1,
|
||||
Help: "The total number of HTTP requests.",
|
||||
},
|
||||
{
|
||||
MetricFamilyName: "rpc_duration_seconds",
|
||||
Type: 5,
|
||||
Help: "A summary of the RPC duration in seconds.",
|
||||
},
|
||||
{
|
||||
MetricFamilyName: "test_metric1",
|
||||
Type: 2,
|
||||
Help: "This is a test metric.",
|
||||
},
|
||||
},
|
||||
Timeseries: []prompb.TimeSeries{
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "http_request_duration_seconds_bucket"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
{Name: "le", Value: "0.1"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 33444, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "http_request_duration_seconds_bucket"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
{Name: "le", Value: "0.5"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 129389, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "http_request_duration_seconds_bucket"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
{Name: "le", Value: "1"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 133988, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "http_request_duration_seconds_bucket"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
{Name: "le", Value: "+Inf"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 144320, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "http_request_duration_seconds_sum"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 53423, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "http_request_duration_seconds_count"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 144320, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "http_requests_total"},
|
||||
{Name: "code", Value: "200"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
{Name: "method", Value: "post"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 1027, Timestamp: 1395066363000}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "http_requests_total"},
|
||||
{Name: "code", Value: "400"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
{Name: "method", Value: "post"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 3, Timestamp: 1395066363000}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "rpc_duration_seconds"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
{Name: "quantile", Value: "0.01"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 3102, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "rpc_duration_seconds"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
{Name: "quantile", Value: "0.5"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 4773, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "rpc_duration_seconds"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
{Name: "quantile", Value: "0.99"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 76656, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "rpc_duration_seconds_sum"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 1.7560473e+07, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "rpc_duration_seconds_count"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 2693, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "test_metric1"},
|
||||
{Name: "b", Value: "c"},
|
||||
{Name: "baz", Value: "qux"},
|
||||
{Name: "d", Value: "e"},
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 1}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
{Name: "__name__", Value: "test_metric1"},
|
||||
{Name: "b", Value: "c"},
|
||||
{Name: "baz", Value: "qux"},
|
||||
{Name: "d", Value: "e"},
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "job", Value: "promtool"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestParseAndPushMetricsTextAndFormat(t *testing.T) {
|
||||
input := bytes.NewReader([]byte(`
|
||||
# HELP http_request_duration_seconds A histogram of the request duration.
|
||||
# TYPE http_request_duration_seconds histogram
|
||||
http_request_duration_seconds_bucket{le="0.1"} 33444 1
|
||||
http_request_duration_seconds_bucket{le="0.5"} 129389 1
|
||||
http_request_duration_seconds_bucket{le="1"} 133988 1
|
||||
http_request_duration_seconds_bucket{le="+Inf"} 144320 1
|
||||
http_request_duration_seconds_sum 53423 1
|
||||
http_request_duration_seconds_count 144320 1
|
||||
# HELP http_requests_total The total number of HTTP requests.
|
||||
# TYPE http_requests_total counter
|
||||
http_requests_total{method="post",code="200"} 1027 1395066363000
|
||||
http_requests_total{method="post",code="400"} 3 1395066363000
|
||||
# HELP rpc_duration_seconds A summary of the RPC duration in seconds.
|
||||
# TYPE rpc_duration_seconds summary
|
||||
rpc_duration_seconds{quantile="0.01"} 3102 1
|
||||
rpc_duration_seconds{quantile="0.5"} 4773 1
|
||||
rpc_duration_seconds{quantile="0.99"} 76656 1
|
||||
rpc_duration_seconds_sum 1.7560473e+07 1
|
||||
rpc_duration_seconds_count 2693 1
|
||||
# HELP test_metric1 This is a test metric.
|
||||
# TYPE test_metric1 gauge
|
||||
test_metric1{b="c",baz="qux",d="e",foo="bar"} 1 1
|
||||
test_metric1{b="c",baz="qux",d="e",foo="bar"} 2 1
|
||||
`))
|
||||
labels := map[string]string{"job": "promtool"}
|
||||
|
||||
expected, err := MetricTextToWriteRequest(input, labels)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, writeRequestFixture, expected)
|
||||
}
|
||||
|
||||
func TestMetricTextToWriteRequestErrorParsingFloatValue(t *testing.T) {
|
||||
input := bytes.NewReader([]byte(`
|
||||
# HELP http_requests_total The total number of HTTP requests.
|
||||
# TYPE http_requests_total counter
|
||||
http_requests_total{method="post",code="200"} 1027Error 1395066363000
|
||||
http_requests_total{method="post",code="400"} 3 1395066363000
|
||||
`))
|
||||
labels := map[string]string{"job": "promtool"}
|
||||
|
||||
_, err := MetricTextToWriteRequest(input, labels)
|
||||
require.Equal(t, err.Error(), "text format parsing error in line 4: expected float as value, got \"1027Error\"")
|
||||
}
|
||||
|
||||
func TestMetricTextToWriteRequestErrorParsingMetricType(t *testing.T) {
|
||||
input := bytes.NewReader([]byte(`
|
||||
# HELP node_info node info summary.
|
||||
# TYPE node_info info
|
||||
node_info{test="summary"} 1 1395066363000
|
||||
`))
|
||||
labels := map[string]string{"job": "promtool"}
|
||||
|
||||
_, err := MetricTextToWriteRequest(input, labels)
|
||||
require.Equal(t, err.Error(), "text format parsing error in line 3: unknown metric type \"info\"")
|
||||
}
|
|
@ -18,16 +18,18 @@ package runtime
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// syscall.RLIM_INFINITY is a constant and its default type is int.
|
||||
// It needs to be converted to an int64 variable to be compared with uint64 values.
|
||||
// See https://golang.org/ref/spec#Conversions
|
||||
var unlimited int64 = syscall.RLIM_INFINITY
|
||||
// syscall.RLIM_INFINITY is a constant.
|
||||
// Its type is int on most architectures but there are exceptions such as loong64.
|
||||
// Uniform it to uint accorind to the standard.
|
||||
// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html
|
||||
var unlimited uint64 = syscall.RLIM_INFINITY & math.MaxUint64
|
||||
|
||||
func limitToString(v uint64, unit string) string {
|
||||
if v == uint64(unlimited) {
|
||||
if v == unlimited {
|
||||
return "unlimited"
|
||||
}
|
||||
return fmt.Sprintf("%d%s", v, unit)
|
||||
|
|
|
@ -43,6 +43,6 @@ func TolerantVerifyLeak(m *testing.M) {
|
|||
// Ignore "ristretto" and its dependency "glog".
|
||||
goleak.IgnoreTopFunction("github.com/dgraph-io/ristretto.(*defaultPolicy).processItems"),
|
||||
goleak.IgnoreTopFunction("github.com/dgraph-io/ristretto.(*Cache).processItems"),
|
||||
goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"),
|
||||
goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1202,16 +1202,26 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult {
|
|||
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a number")}, nil, nil}
|
||||
}
|
||||
}
|
||||
limitPerMetric := -1
|
||||
if s := r.FormValue("limit_per_metric"); s != "" {
|
||||
var err error
|
||||
if limitPerMetric, err = strconv.Atoi(s); err != nil {
|
||||
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit_per_metric must be a number")}, nil, nil}
|
||||
}
|
||||
}
|
||||
|
||||
metric := r.FormValue("metric")
|
||||
for _, tt := range api.targetRetriever(r.Context()).TargetsActive() {
|
||||
for _, t := range tt {
|
||||
|
||||
if metric == "" {
|
||||
for _, mm := range t.MetadataList() {
|
||||
m := metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit}
|
||||
ms, ok := metrics[mm.Metric]
|
||||
|
||||
if limitPerMetric > 0 && len(ms) >= limitPerMetric {
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
ms = map[metadata]struct{}{}
|
||||
metrics[mm.Metric] = ms
|
||||
|
@ -1225,6 +1235,10 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult {
|
|||
m := metadata{Type: md.Type, Help: md.Help, Unit: md.Unit}
|
||||
ms, ok := metrics[md.Metric]
|
||||
|
||||
if limitPerMetric > 0 && len(ms) >= limitPerMetric {
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
ms = map[metadata]struct{}{}
|
||||
metrics[md.Metric] = ms
|
||||
|
@ -1499,7 +1513,7 @@ func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult {
|
|||
chunkCount := int64(math.NaN())
|
||||
for _, mF := range metrics {
|
||||
if *mF.Name == "prometheus_tsdb_head_chunks" {
|
||||
m := *mF.Metric[0]
|
||||
m := mF.Metric[0]
|
||||
if m.Gauge != nil {
|
||||
chunkCount = int64(m.Gauge.GetValue())
|
||||
break
|
||||
|
|
|
@ -1021,15 +1021,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
}
|
||||
|
||||
type test struct {
|
||||
endpoint apiFunc
|
||||
params map[string]string
|
||||
query url.Values
|
||||
response interface{}
|
||||
responseLen int
|
||||
errType errorType
|
||||
sorter func(interface{})
|
||||
metadata []targetMetadata
|
||||
exemplars []exemplar.QueryResult
|
||||
endpoint apiFunc
|
||||
params map[string]string
|
||||
query url.Values
|
||||
response interface{}
|
||||
responseLen int
|
||||
responseMetadataTotal int
|
||||
errType errorType
|
||||
sorter func(interface{})
|
||||
metadata []targetMetadata
|
||||
exemplars []exemplar.QueryResult
|
||||
}
|
||||
|
||||
tests := []test{
|
||||
|
@ -1774,6 +1775,126 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
},
|
||||
responseLen: 2,
|
||||
},
|
||||
// With a limit for the number of metadata per metric.
|
||||
{
|
||||
endpoint: api.metricMetadata,
|
||||
query: url.Values{"limit_per_metric": []string{"1"}},
|
||||
metadata: []targetMetadata{
|
||||
{
|
||||
identifier: "test",
|
||||
metadata: []scrape.MetricMetadata{
|
||||
{
|
||||
Metric: "go_threads",
|
||||
Type: textparse.MetricTypeGauge,
|
||||
Help: "Number of OS threads created",
|
||||
Unit: "",
|
||||
},
|
||||
{
|
||||
Metric: "go_threads",
|
||||
Type: textparse.MetricTypeGauge,
|
||||
Help: "Repeated metadata",
|
||||
Unit: "",
|
||||
},
|
||||
{
|
||||
Metric: "go_gc_duration_seconds",
|
||||
Type: textparse.MetricTypeSummary,
|
||||
Help: "A summary of the GC invocation durations.",
|
||||
Unit: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
response: map[string][]metadata{
|
||||
"go_threads": {
|
||||
{textparse.MetricTypeGauge, "Number of OS threads created", ""},
|
||||
},
|
||||
"go_gc_duration_seconds": {
|
||||
{textparse.MetricTypeSummary, "A summary of the GC invocation durations.", ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
// With a limit for the number of metadata per metric and per metric.
|
||||
{
|
||||
endpoint: api.metricMetadata,
|
||||
query: url.Values{"limit_per_metric": []string{"1"}, "limit": []string{"1"}},
|
||||
metadata: []targetMetadata{
|
||||
{
|
||||
identifier: "test",
|
||||
metadata: []scrape.MetricMetadata{
|
||||
{
|
||||
Metric: "go_threads",
|
||||
Type: textparse.MetricTypeGauge,
|
||||
Help: "Number of OS threads created",
|
||||
Unit: "",
|
||||
},
|
||||
{
|
||||
Metric: "go_threads",
|
||||
Type: textparse.MetricTypeGauge,
|
||||
Help: "Repeated metadata",
|
||||
Unit: "",
|
||||
},
|
||||
{
|
||||
Metric: "go_gc_duration_seconds",
|
||||
Type: textparse.MetricTypeSummary,
|
||||
Help: "A summary of the GC invocation durations.",
|
||||
Unit: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
responseLen: 1,
|
||||
responseMetadataTotal: 1,
|
||||
},
|
||||
|
||||
// With a limit for the number of metadata per metric and per metric, while having multiple targets.
|
||||
{
|
||||
endpoint: api.metricMetadata,
|
||||
query: url.Values{"limit_per_metric": []string{"1"}, "limit": []string{"1"}},
|
||||
metadata: []targetMetadata{
|
||||
{
|
||||
identifier: "test",
|
||||
metadata: []scrape.MetricMetadata{
|
||||
{
|
||||
Metric: "go_threads",
|
||||
Type: textparse.MetricTypeGauge,
|
||||
Help: "Number of OS threads created",
|
||||
Unit: "",
|
||||
},
|
||||
{
|
||||
Metric: "go_threads",
|
||||
Type: textparse.MetricTypeGauge,
|
||||
Help: "Repeated metadata",
|
||||
Unit: "",
|
||||
},
|
||||
{
|
||||
Metric: "go_gc_duration_seconds",
|
||||
Type: textparse.MetricTypeSummary,
|
||||
Help: "A summary of the GC invocation durations.",
|
||||
Unit: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
identifier: "secondTarget",
|
||||
metadata: []scrape.MetricMetadata{
|
||||
{
|
||||
Metric: "go_threads",
|
||||
Type: textparse.MetricTypeGauge,
|
||||
Help: "Number of OS threads created, but from a different target",
|
||||
Unit: "",
|
||||
},
|
||||
{
|
||||
Metric: "go_gc_duration_seconds",
|
||||
Type: textparse.MetricTypeSummary,
|
||||
Help: "A summary of the GC invocation durations, but from a different target.",
|
||||
Unit: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
responseLen: 1,
|
||||
responseMetadataTotal: 1,
|
||||
},
|
||||
// When requesting a specific metric that is present.
|
||||
{
|
||||
endpoint: api.metricMetadata,
|
||||
|
@ -2563,6 +2684,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
|
||||
if test.responseLen != 0 {
|
||||
assertAPIResponseLength(t, res.data, test.responseLen)
|
||||
if test.responseMetadataTotal != 0 {
|
||||
assertAPIResponseMetadataLen(t, res.data, test.responseMetadataTotal)
|
||||
}
|
||||
} else {
|
||||
assertAPIResponse(t, res.data, test.response)
|
||||
}
|
||||
|
@ -2613,6 +2737,24 @@ func assertAPIResponseLength(t *testing.T, got interface{}, expLen int) {
|
|||
}
|
||||
}
|
||||
|
||||
func assertAPIResponseMetadataLen(t *testing.T, got interface{}, expLen int) {
|
||||
t.Helper()
|
||||
|
||||
var gotLen int
|
||||
response := got.(map[string][]metadata)
|
||||
for _, m := range response {
|
||||
gotLen += len(m)
|
||||
}
|
||||
|
||||
if gotLen != expLen {
|
||||
t.Fatalf(
|
||||
"Amount of metadata in the response does not match, expected:\n%d\ngot:\n%d",
|
||||
expLen,
|
||||
gotLen,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeDB struct {
|
||||
err error
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
CodeMirror-promql
|
||||
=================
|
||||
|
||||
This project provides a mode for [CodeMirror Next](https://codemirror.net/6) that handles syntax highlighting, linting
|
||||
This project provides a mode for [CodeMirror](https://codemirror.net/6/) that handles syntax highlighting, linting
|
||||
and autocompletion for PromQL ([Prometheus Query Language](https://prometheus.io/docs/introduction/overview/)).
|
||||
|
||||

|
||||
|
@ -15,7 +15,7 @@ npm install --save @prometheus-io/codemirror-promql
|
|||
```
|
||||
|
||||
**Note:** You will have to manually install different packages that are part
|
||||
of [CodeMirror Next](https://codemirror.net/6), as they are a peer dependency to this package. Here are the different
|
||||
of [CodeMirror](https://codemirror.net/6/), as they are a peer dependency to this package. Here are the different
|
||||
packages you need to install:
|
||||
|
||||
* **@codemirror/autocomplete**
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/codemirror-promql",
|
||||
"version": "0.44.0",
|
||||
"version": "0.45.0",
|
||||
"description": "a CodeMirror mode for the PromQL language",
|
||||
"types": "dist/esm/index.d.ts",
|
||||
"module": "dist/esm/index.js",
|
||||
|
@ -29,21 +29,21 @@
|
|||
},
|
||||
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
||||
"dependencies": {
|
||||
"@prometheus-io/lezer-promql": "0.44.0",
|
||||
"@prometheus-io/lezer-promql": "0.45.0",
|
||||
"lru-cache": "^6.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@codemirror/autocomplete": "^6.4.0",
|
||||
"@codemirror/language": "^6.4.0",
|
||||
"@codemirror/lint": "^6.1.0",
|
||||
"@codemirror/state": "^6.2.0",
|
||||
"@codemirror/view": "^6.7.3",
|
||||
"@lezer/common": "^1.0.2",
|
||||
"@lezer/lr": "^1.3.1",
|
||||
"@lezer/highlight": "^1.1.3",
|
||||
"@codemirror/autocomplete": "^6.7.1",
|
||||
"@codemirror/language": "^6.7.0",
|
||||
"@codemirror/lint": "^6.2.2",
|
||||
"@codemirror/state": "^6.2.1",
|
||||
"@codemirror/view": "^6.13.0",
|
||||
"@lezer/common": "^1.0.3",
|
||||
"@lezer/lr": "^1.3.6",
|
||||
"@lezer/highlight": "^1.1.6",
|
||||
"@types/lru-cache": "^5.1.1",
|
||||
"isomorphic-fetch": "^3.0.0",
|
||||
"nock": "^13.3.0"
|
||||
"nock": "^13.3.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@codemirror/autocomplete": "^6.4.0",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/lezer-promql",
|
||||
"version": "0.44.0",
|
||||
"version": "0.45.0",
|
||||
"description": "lezer-based PromQL grammar",
|
||||
"main": "dist/index.cjs",
|
||||
"type": "module",
|
||||
|
@ -30,9 +30,9 @@
|
|||
"test": "NODE_OPTIONS=--experimental-vm-modules jest"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@lezer/generator": "^1.2.2",
|
||||
"@lezer/lr": "^1.3.1",
|
||||
"@lezer/highlight": "^1.1.3"
|
||||
"@lezer/generator": "^1.2.3",
|
||||
"@lezer/lr": "^1.3.6",
|
||||
"@lezer/highlight": "^1.1.6"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@lezer/lr": "^1.2.3",
|
||||
|
|
606
web/ui/package-lock.json
generated
606
web/ui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
@ -16,16 +16,16 @@
|
|||
"npm": ">=7.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/jest": "^29.4.0",
|
||||
"@types/jest": "^29.5.2",
|
||||
"@types/node": "^17.0.45",
|
||||
"eslint-config-prettier": "^8.6.0",
|
||||
"eslint-config-prettier": "^8.8.0",
|
||||
"eslint-config-react-app": "^7.0.1",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
"jest-canvas-mock": "^2.4.0",
|
||||
"jest-canvas-mock": "^2.5.1",
|
||||
"jest-fetch-mock": "^3.0.3",
|
||||
"react-scripts": "^5.0.1",
|
||||
"prettier": "^2.8.3",
|
||||
"ts-jest": "^29.0.5",
|
||||
"typescript": "^4.9.4"
|
||||
"prettier": "^2.8.8",
|
||||
"ts-jest": "^29.1.0",
|
||||
"typescript": "^4.9.5"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,33 +1,33 @@
|
|||
{
|
||||
"name": "@prometheus-io/app",
|
||||
"version": "0.44.0",
|
||||
"version": "0.45.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^6.4.0",
|
||||
"@codemirror/commands": "^6.2.0",
|
||||
"@codemirror/language": "^6.4.0",
|
||||
"@codemirror/lint": "^6.1.0",
|
||||
"@codemirror/search": "^6.2.3",
|
||||
"@codemirror/state": "^6.2.0",
|
||||
"@codemirror/view": "^6.7.3",
|
||||
"@codemirror/autocomplete": "^6.7.1",
|
||||
"@codemirror/commands": "^6.2.4",
|
||||
"@codemirror/language": "^6.7.0",
|
||||
"@codemirror/lint": "^6.2.2",
|
||||
"@codemirror/search": "^6.5.0",
|
||||
"@codemirror/state": "^6.2.1",
|
||||
"@codemirror/view": "^6.13.0",
|
||||
"@forevolve/bootstrap-dark": "^2.1.1",
|
||||
"@fortawesome/fontawesome-svg-core": "6.2.1",
|
||||
"@fortawesome/free-solid-svg-icons": "6.2.1",
|
||||
"@fortawesome/fontawesome-svg-core": "6.4.0",
|
||||
"@fortawesome/free-solid-svg-icons": "6.4.0",
|
||||
"@fortawesome/react-fontawesome": "0.2.0",
|
||||
"@lezer/lr": "^1.3.1",
|
||||
"@lezer/highlight": "^1.1.3",
|
||||
"@lezer/common": "^1.0.2",
|
||||
"@lezer/lr": "^1.3.6",
|
||||
"@lezer/highlight": "^1.1.6",
|
||||
"@lezer/common": "^1.0.3",
|
||||
"@nexucis/fuzzy": "^0.4.1",
|
||||
"@nexucis/kvsearch": "^0.8.1",
|
||||
"@prometheus-io/codemirror-promql": "0.44.0",
|
||||
"@prometheus-io/codemirror-promql": "0.45.0",
|
||||
"bootstrap": "^4.6.2",
|
||||
"css.escape": "^1.5.1",
|
||||
"downshift": "^7.2.0",
|
||||
"downshift": "^7.6.0",
|
||||
"http-proxy-middleware": "^2.0.6",
|
||||
"jquery": "^3.6.3",
|
||||
"jquery": "^3.7.0",
|
||||
"jquery.flot.tooltip": "^0.9.0",
|
||||
"moment": "^2.29.4",
|
||||
"moment-timezone": "^0.5.40",
|
||||
"moment-timezone": "^0.5.43",
|
||||
"popper.js": "^1.14.3",
|
||||
"react": "^17.0.2",
|
||||
"react-copy-to-clipboard": "^5.1.0",
|
||||
|
@ -37,8 +37,8 @@
|
|||
"react-router-dom": "^5.3.4",
|
||||
"react-test-renderer": "^17.0.2",
|
||||
"reactstrap": "^8.10.1",
|
||||
"sanitize-html": "^2.8.1",
|
||||
"sass": "1.57.1",
|
||||
"sanitize-html": "^2.10.0",
|
||||
"sass": "1.62.1",
|
||||
"tempusdominus-bootstrap-4": "^5.39.2",
|
||||
"tempusdominus-core": "^5.19.3"
|
||||
},
|
||||
|
@ -66,15 +66,15 @@
|
|||
],
|
||||
"devDependencies": {
|
||||
"@testing-library/react-hooks": "^7.0.2",
|
||||
"@types/enzyme": "^3.10.12",
|
||||
"@types/enzyme": "^3.10.13",
|
||||
"@types/flot": "0.0.32",
|
||||
"@types/jquery": "^3.5.16",
|
||||
"@types/react": "^17.0.53",
|
||||
"@types/react": "^17.0.60",
|
||||
"@types/react-copy-to-clipboard": "^5.0.4",
|
||||
"@types/react-dom": "^17.0.18",
|
||||
"@types/react-dom": "^17.0.20",
|
||||
"@types/react-router-dom": "^5.3.3",
|
||||
"@types/sanitize-html": "^2.8.0",
|
||||
"@types/sinon": "^10.0.13",
|
||||
"@types/sanitize-html": "^2.9.0",
|
||||
"@types/sinon": "^10.0.15",
|
||||
"@wojtekmaj/enzyme-adapter-react-17": "^0.8.0",
|
||||
"enzyme": "^3.11.0",
|
||||
"enzyme-to-json": "^3.6.2",
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
-->
|
||||
<!--
|
||||
The TITLE_PLACEHOLDER magic value is replaced during serving by Prometheus.
|
||||
We need it dynamic because it can be overriden by the command line flag `web.page-title`.
|
||||
We need it dynamic because it can be overridden by the command line flag `web.page-title`.
|
||||
-->
|
||||
<title>TITLE_PLACEHOLDER</title>
|
||||
</head>
|
||||
|
|
|
@ -9,7 +9,7 @@ export interface ThemeCtx {
|
|||
setTheme: (t: themeSetting) => void;
|
||||
}
|
||||
|
||||
// defaults, will be overriden in App.tsx
|
||||
// defaults, will be overridden in App.tsx
|
||||
export const ThemeContext = React.createContext<ThemeCtx>({
|
||||
theme: 'light',
|
||||
userPreference: 'auto',
|
||||
|
|
|
@ -158,6 +158,7 @@ func (m *metrics) instrumentHandlerWithPrefix(prefix string) func(handlerName st
|
|||
}
|
||||
|
||||
func (m *metrics) instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
|
||||
m.requestCounter.WithLabelValues(handlerName, "200")
|
||||
return promhttp.InstrumentHandlerCounter(
|
||||
m.requestCounter.MustCurryWith(prometheus.Labels{"handler": handlerName}),
|
||||
promhttp.InstrumentHandlerDuration(
|
||||
|
@ -744,7 +745,7 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
|
|||
}
|
||||
|
||||
func toFloat64(f *io_prometheus_client.MetricFamily) float64 {
|
||||
m := *f.Metric[0]
|
||||
m := f.Metric[0]
|
||||
if m.Gauge != nil {
|
||||
return m.Gauge.GetValue()
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue