mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-14 17:44:06 -08:00
commit
ffdaf8e346
|
@ -3,7 +3,7 @@ version: 2.1
|
|||
|
||||
orbs:
|
||||
prometheus: prometheus/prometheus@0.11.0
|
||||
go: circleci/go@0.2.0
|
||||
go: circleci/go@1.7.0
|
||||
win: circleci/windows@2.3.0
|
||||
|
||||
executors:
|
||||
|
@ -11,10 +11,10 @@ executors:
|
|||
# should also be updated.
|
||||
golang:
|
||||
docker:
|
||||
- image: circleci/golang:1.16-node
|
||||
- image: quay.io/prometheus/golang-builder:1.17-base
|
||||
golang_115:
|
||||
docker:
|
||||
- image: circleci/golang:1.15-node
|
||||
- image: quay.io/prometheus/golang-builder:1.15-base
|
||||
|
||||
jobs:
|
||||
test_go:
|
||||
|
@ -24,8 +24,6 @@ jobs:
|
|||
- prometheus/setup_environment
|
||||
- go/load-cache:
|
||||
key: v1
|
||||
- run:
|
||||
command: sudo apt-get install -y yamllint
|
||||
- run:
|
||||
command: make GO_ONLY=1
|
||||
environment:
|
||||
|
@ -116,8 +114,6 @@ jobs:
|
|||
- run:
|
||||
command: jb install
|
||||
working_directory: ~/project/documentation/prometheus-mixin
|
||||
- run:
|
||||
command: sudo apt-get install -y yamllint
|
||||
- run:
|
||||
command: make
|
||||
working_directory: ~/project/documentation/prometheus-mixin
|
||||
|
@ -129,8 +125,6 @@ jobs:
|
|||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- run: mkdir -v -p "${PATH%%:*}" && curl -sL --fail https://github.com/mikefarah/yq/releases/download/v4.6.3/yq_linux_amd64 -o "${PATH%%:*}/yq" && chmod -v +x "${PATH%%:*}/yq"
|
||||
- run: sha256sum -c <(echo "c4343783c3361495c0d6d1eb742bba7432aa65e13e9fb8d7e201d544bcf14246 ${PATH%%:*}/yq")
|
||||
- run: ./scripts/sync_repo_files.sh
|
||||
|
||||
workflows:
|
||||
|
|
29
.github/workflows/golangci-lint.yml
vendored
Normal file
29
.github/workflows/golangci-lint.yml
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
name: golangci-lint
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "go.sum"
|
||||
- "go.mod"
|
||||
- "**.go"
|
||||
- "scripts/errcheck_excludes.txt"
|
||||
- ".github/workflows/golangci-lint.yml"
|
||||
pull_request:
|
||||
paths:
|
||||
- "go.sum"
|
||||
- "go.mod"
|
||||
- "**.go"
|
||||
- "scripts/errcheck_excludes.txt"
|
||||
- ".github/workflows/golangci-lint.yml"
|
||||
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: v1.42.0
|
11
.github/workflows/test.yml
vendored
11
.github/workflows/test.yml
vendored
|
@ -16,7 +16,16 @@ jobs:
|
|||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Upgrade golang
|
||||
run: |
|
||||
cd /tmp
|
||||
wget https://dl.google.com/go/go1.16.8.linux-amd64.tar.gz
|
||||
tar -zxvf go1.16.8.linux-amd64.tar.gz
|
||||
sudo rm -fr /usr/local/go
|
||||
sudo mv /tmp/go /usr/local/go
|
||||
cd -
|
||||
ls -l /usr/bin/go
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v2
|
||||
- name: Run Tests
|
||||
run: make common-test
|
||||
run: GO=/usr/local/go/bin/go make common-test
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
go:
|
||||
# Whenever the Go version is updated here,
|
||||
# .circle/config.yml should also be updated.
|
||||
version: 1.16
|
||||
version: 1.17
|
||||
repository:
|
||||
path: github.com/prometheus/prometheus
|
||||
build:
|
||||
|
|
|
@ -24,3 +24,4 @@ rules:
|
|||
.github/workflows/funcbench.yml
|
||||
.github/workflows/fuzzing.yml
|
||||
.github/workflows/prombench.yml
|
||||
.github/workflows/golangci-lint.yml
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
## 2.29.2 / 2021-08-27
|
||||
|
||||
* [BUGFIX] Fix Kubernetes SD failing to discover Ingress in Kubernetes v1.22. #9205
|
||||
* [BUGFIX] Fix data race in loading write-ahead-log (WAL). #9259
|
||||
|
||||
## 2.29.1 / 2021-08-11
|
||||
|
||||
* [BUGFIX] tsdb: align atomically accessed int64 to prevent panic in 32-bit
|
||||
|
|
|
@ -52,7 +52,7 @@ All our issues are regularly tagged so that you can also filter down the issues
|
|||
|
||||
* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
|
||||
|
||||
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://web.libera.chat/?channels=#prometheus) on irc.libera.chat (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
|
||||
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on the IRC channel [#prometheus-dev](https://web.libera.chat/?channels=#prometheus-dev) on irc.libera.chat (for the easiest start, [join via Element](https://app.element.io/#/room/#prometheus-dev:matrix.org)).
|
||||
|
||||
* Add tests relevant to the fixed bug or new feature.
|
||||
|
||||
|
@ -64,10 +64,10 @@ To add or update a new dependency, use the `go get` command:
|
|||
|
||||
```bash
|
||||
# Pick the latest tagged release.
|
||||
go get example.com/some/module/pkg
|
||||
go install example.com/some/module/pkg@latest
|
||||
|
||||
# Pick a specific version.
|
||||
go get example.com/some/module/pkg@vX.Y.Z
|
||||
go install example.com/some/module/pkg@vX.Y.Z
|
||||
```
|
||||
|
||||
Tidy up the `go.mod` and `go.sum` files:
|
||||
|
|
|
@ -83,12 +83,18 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.39.0
|
||||
GOLANGCI_LINT_VERSION ?= v1.42.0
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
# If we're in CI and there is an Actions file, that means the linter
|
||||
# is being run in Actions, so we don't need to run it here.
|
||||
ifeq (,$(CIRCLE_JOB))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
|
|
|
@ -63,10 +63,10 @@ installed in order to build the frontend assets.
|
|||
You can directly use the `go` tool to download and install the `prometheus`
|
||||
and `promtool` binaries into your `GOPATH`:
|
||||
|
||||
$ GO111MODULE=on go get github.com/prometheus/prometheus/cmd/...
|
||||
$ GO111MODULE=on go install github.com/prometheus/prometheus/cmd/...
|
||||
$ prometheus --config.file=your_config.yml
|
||||
|
||||
*However*, when using `go get` to build Prometheus, Prometheus will expect to be able to
|
||||
*However*, when using `go install` to build Prometheus, Prometheus will expect to be able to
|
||||
read its web assets from local filesystem directories under `web/ui/static` and
|
||||
`web/ui/templates`. In order for these assets to be found, you will have to run Prometheus
|
||||
from the root of the cloned repository. Note also that these directories do not include the
|
||||
|
|
|
@ -107,6 +107,7 @@ type flagConfig struct {
|
|||
outageTolerance model.Duration
|
||||
resendDelay model.Duration
|
||||
web web.Options
|
||||
scrape scrape.Options
|
||||
tsdb tsdbOptions
|
||||
lookbackDelta model.Duration
|
||||
webTimeout model.Duration
|
||||
|
@ -152,6 +153,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "memory-snapshot-on-shutdown":
|
||||
c.tsdb.EnableMemorySnapshotOnShutdown = true
|
||||
level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled")
|
||||
case "extra-scrape-metrics":
|
||||
c.scrape.ExtraMetrics = true
|
||||
level.Info(logger).Log("msg", "Experimental additional scrape metrics")
|
||||
case "":
|
||||
continue
|
||||
default:
|
||||
|
@ -291,9 +295,12 @@ func main() {
|
|||
a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
|
||||
Default("1m").SetValue(&cfg.resendDelay)
|
||||
|
||||
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to 2ms to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
|
||||
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
|
||||
Hidden().Default("true").BoolVar(&scrape.AlignScrapeTimestamps)
|
||||
|
||||
a.Flag("scrape.timestamp-tolerance", "Timestamp tolerance. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
|
||||
Hidden().Default("2ms").DurationVar(&scrape.ScrapeTimestampTolerance)
|
||||
|
||||
a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
|
||||
Default("10000").IntVar(&cfg.notifier.QueueCapacity)
|
||||
|
||||
|
@ -312,7 +319,7 @@ func main() {
|
|||
a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return.").
|
||||
Default("50000000").IntVar(&cfg.queryMaxSamples)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
@ -457,7 +464,7 @@ func main() {
|
|||
ctxNotify, cancelNotify = context.WithCancel(context.Background())
|
||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify"))
|
||||
|
||||
scrapeManager = scrape.NewManager(log.With(logger, "component", "scrape manager"), fanoutStorage)
|
||||
scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage)
|
||||
|
||||
opts = promql.EngineOpts{
|
||||
Logger: log.With(logger, "component", "query engine"),
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -48,6 +48,7 @@ import (
|
|||
_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
|
||||
"github.com/prometheus/prometheus/discovery/kubernetes"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/rulefmt"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
)
|
||||
|
@ -136,6 +137,7 @@ func main() {
|
|||
analyzePath := tsdbAnalyzeCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
analyzeBlockID := tsdbAnalyzeCmd.Arg("block id", "Block to analyze (default is the last block).").String()
|
||||
analyzeLimit := tsdbAnalyzeCmd.Flag("limit", "How many items to show in each list.").Default("20").Int()
|
||||
analyzeRunExtended := tsdbAnalyzeCmd.Flag("extended", "Run extended analysis.").Bool()
|
||||
|
||||
tsdbListCmd := tsdbCmd.Command("list", "List tsdb blocks.")
|
||||
listHumanReadable := tsdbListCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
|
||||
|
@ -236,7 +238,7 @@ func main() {
|
|||
os.Exit(checkErr(benchmarkWrite(*benchWriteOutPath, *benchSamplesFile, *benchWriteNumMetrics, *benchWriteNumScrapes)))
|
||||
|
||||
case tsdbAnalyzeCmd.FullCommand():
|
||||
os.Exit(checkErr(analyzeBlock(*analyzePath, *analyzeBlockID, *analyzeLimit)))
|
||||
os.Exit(checkErr(analyzeBlock(*analyzePath, *analyzeBlockID, *analyzeLimit, *analyzeRunExtended)))
|
||||
|
||||
case tsdbListCmd.FullCommand():
|
||||
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
|
||||
|
@ -471,8 +473,8 @@ func checkRules(filename string) (int, []error) {
|
|||
fmt.Printf("%d duplicate rule(s) found.\n", len(dRules))
|
||||
for _, n := range dRules {
|
||||
fmt.Printf("Metric: %s\nLabel(s):\n", n.metric)
|
||||
for i, l := range n.label {
|
||||
fmt.Printf("\t%s: %s\n", i, l)
|
||||
for _, l := range n.label {
|
||||
fmt.Printf("\t%s: %s\n", l.Name, l.Value)
|
||||
}
|
||||
}
|
||||
fmt.Println("Might cause inconsistency while recording expressions.")
|
||||
|
@ -483,29 +485,52 @@ func checkRules(filename string) (int, []error) {
|
|||
|
||||
type compareRuleType struct {
|
||||
metric string
|
||||
label map[string]string
|
||||
label labels.Labels
|
||||
}
|
||||
|
||||
type compareRuleTypes []compareRuleType
|
||||
|
||||
func (c compareRuleTypes) Len() int { return len(c) }
|
||||
func (c compareRuleTypes) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
|
||||
func (c compareRuleTypes) Less(i, j int) bool { return compare(c[i], c[j]) < 0 }
|
||||
|
||||
func compare(a, b compareRuleType) int {
|
||||
if res := strings.Compare(a.metric, b.metric); res != 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
return labels.Compare(a.label, b.label)
|
||||
}
|
||||
|
||||
func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType {
|
||||
var duplicates []compareRuleType
|
||||
var rules compareRuleTypes
|
||||
|
||||
for _, group := range groups {
|
||||
for index, rule := range group.Rules {
|
||||
inst := compareRuleType{
|
||||
|
||||
for _, rule := range group.Rules {
|
||||
rules = append(rules, compareRuleType{
|
||||
metric: ruleMetric(rule),
|
||||
label: rule.Labels,
|
||||
}
|
||||
for i := 0; i < index; i++ {
|
||||
t := compareRuleType{
|
||||
metric: ruleMetric(group.Rules[i]),
|
||||
label: group.Rules[i].Labels,
|
||||
}
|
||||
if reflect.DeepEqual(t, inst) {
|
||||
duplicates = append(duplicates, t)
|
||||
}
|
||||
}
|
||||
label: labels.FromMap(rule.Labels),
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(rules) < 2 {
|
||||
return duplicates
|
||||
}
|
||||
sort.Sort(rules)
|
||||
|
||||
last := rules[0]
|
||||
for i := 1; i < len(rules); i++ {
|
||||
if compare(last, rules[i]) == 0 {
|
||||
// Don't add a duplicated rule multiple times.
|
||||
if len(duplicates) == 0 || compare(last, duplicates[len(duplicates)-1]) != 0 {
|
||||
duplicates = append(duplicates, rules[i])
|
||||
}
|
||||
}
|
||||
last = rules[i]
|
||||
}
|
||||
|
||||
return duplicates
|
||||
}
|
||||
|
||||
|
@ -911,20 +936,17 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval time.D
|
|||
} else {
|
||||
etime, err = parseTime(end)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing end time:", err)
|
||||
return err
|
||||
return fmt.Errorf("error parsing end time: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
stime, err = parseTime(start)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing start time:", err)
|
||||
return err
|
||||
return fmt.Errorf("error parsing start time: %v", err)
|
||||
}
|
||||
|
||||
if !stime.Before(etime) {
|
||||
fmt.Fprintln(os.Stderr, "start time is not before end time")
|
||||
return nil
|
||||
return errors.New("start time is not before end time")
|
||||
}
|
||||
|
||||
cfg := ruleImporterConfig{
|
||||
|
@ -937,25 +959,24 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval time.D
|
|||
Address: url.String(),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "new api client error", err)
|
||||
return err
|
||||
return fmt.Errorf("new api client error: %v", err)
|
||||
}
|
||||
|
||||
ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, v1.NewAPI(client))
|
||||
errs := ruleImporter.loadGroups(ctx, files)
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "rule importer parse error", err)
|
||||
return err
|
||||
return fmt.Errorf("rule importer parse error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
errs = ruleImporter.importAll(ctx)
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "rule importer error", err)
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "rule importer error:", err)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New("error importing rules")
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,6 +21,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/rulefmt"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -118,3 +120,46 @@ func TestCheckSDFile(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckDuplicates(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
ruleFile string
|
||||
expectedDups []compareRuleType
|
||||
}{
|
||||
{
|
||||
name: "no duplicates",
|
||||
ruleFile: "./testdata/rules.yml",
|
||||
},
|
||||
{
|
||||
name: "duplicate in other group",
|
||||
ruleFile: "./testdata/rules_duplicates.yml",
|
||||
expectedDups: []compareRuleType{
|
||||
{
|
||||
metric: "job:test:count_over_time1m",
|
||||
label: labels.New(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
c := test
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
rgs, err := rulefmt.ParseFile(c.ruleFile)
|
||||
require.Empty(t, err)
|
||||
dups := checkDuplicates(rgs.Groups)
|
||||
require.Equal(t, c.expectedDups, dups)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCheckDuplicates(b *testing.B) {
|
||||
rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml")
|
||||
require.Empty(b, err)
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
checkDuplicates(rgs.Groups)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ type ruleImporterConfig struct {
|
|||
// newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series
|
||||
// written to disk in blocks.
|
||||
func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter {
|
||||
level.Info(logger).Log("backfiller", "new rule importer from start", config.start.Format(time.RFC822), " to end", config.end.Format(time.RFC822))
|
||||
level.Info(logger).Log("backfiller", "new rule importer", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822))
|
||||
return &ruleImporter{
|
||||
logger: logger,
|
||||
config: config,
|
||||
|
|
24
cmd/promtool/testdata/rules_duplicates.yml
vendored
Normal file
24
cmd/promtool/testdata/rules_duplicates.yml
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
# This is a rules file with duplicate expressions
|
||||
|
||||
groups:
|
||||
- name: base
|
||||
rules:
|
||||
- record: job:test:count_over_time1m
|
||||
expr: sum without(instance) (count_over_time(test[1m]))
|
||||
|
||||
# A recording rule that doesn't depend on input series.
|
||||
- record: fixed_data
|
||||
expr: 1
|
||||
|
||||
# Subquery with default resolution test.
|
||||
- record: suquery_interval_test
|
||||
expr: count_over_time(up[5m:])
|
||||
|
||||
# Duplicating
|
||||
- record: job:test:count_over_time1m
|
||||
expr: sum without(instance) (count_over_time(test[1m]))
|
||||
|
||||
- name: duplicate
|
||||
rules:
|
||||
- record: job:test:count_over_time1m
|
||||
expr: sum without(instance) (count_over_time(test[1m]))
|
40011
cmd/promtool/testdata/rules_large.yml
vendored
Normal file
40011
cmd/promtool/testdata/rules_large.yml
vendored
Normal file
File diff suppressed because it is too large
Load diff
|
@ -418,7 +418,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
|
|||
return db, block, nil
|
||||
}
|
||||
|
||||
func analyzeBlock(path, blockID string, limit int) error {
|
||||
func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
||||
db, block, err := openBlock(path, blockID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -564,7 +564,11 @@ func analyzeBlock(path, blockID string, limit int) error {
|
|||
fmt.Printf("\nHighest cardinality metric names:\n")
|
||||
printInfo(postingInfos)
|
||||
|
||||
return analyzeCompaction(block, ir)
|
||||
if runExtended {
|
||||
return analyzeCompaction(block, ir)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err error) {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/sigv4"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -666,7 +667,7 @@ type RemoteWriteConfig struct {
|
|||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
|
||||
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
|
||||
SigV4Config *SigV4Config `yaml:"sigv4,omitempty"`
|
||||
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -758,17 +759,6 @@ type MetadataConfig struct {
|
|||
MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"`
|
||||
}
|
||||
|
||||
// SigV4Config is the configuration for signing remote write requests with
|
||||
// AWS's SigV4 verification process. Empty values will be retrieved using the
|
||||
// AWS default credentials chain.
|
||||
type SigV4Config struct {
|
||||
Region string `yaml:"region,omitempty"`
|
||||
AccessKey string `yaml:"access_key,omitempty"`
|
||||
SecretKey config.Secret `yaml:"secret_key,omitempty"`
|
||||
Profile string `yaml:"profile,omitempty"`
|
||||
RoleARN string `yaml:"role_arn,omitempty"`
|
||||
}
|
||||
|
||||
// RemoteReadConfig is the configuration for reading from remote storage.
|
||||
type RemoteReadConfig struct {
|
||||
URL *config.URL `yaml:"url"`
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package config
|
||||
|
|
|
@ -1302,6 +1302,10 @@ var expectedErrors = []struct {
|
|||
filename: "http_url_bad_scheme.bad.yml",
|
||||
errMsg: "URL scheme must be 'http' or 'https'",
|
||||
},
|
||||
{
|
||||
filename: "empty_scrape_config_action.bad.yml",
|
||||
errMsg: "relabel action cannot be empty",
|
||||
},
|
||||
}
|
||||
|
||||
func TestBadConfigs(t *testing.T) {
|
||||
|
|
4
config/testdata/empty_scrape_config_action.bad.yml
vendored
Normal file
4
config/testdata/empty_scrape_config_action.bad.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
relabel_configs:
|
||||
- action: null
|
|
@ -15,6 +15,7 @@ package kubernetes
|
|||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
|
@ -193,18 +194,16 @@ func (i *Ingress) buildIngress(ingress ingressAdaptor) *targetgroup.Group {
|
|||
}
|
||||
tg.Labels = ingressLabels(ingress)
|
||||
|
||||
tlsHosts := make(map[string]struct{})
|
||||
for _, host := range ingress.tlsHosts() {
|
||||
tlsHosts[host] = struct{}{}
|
||||
}
|
||||
|
||||
for _, rule := range ingress.rules() {
|
||||
scheme := "http"
|
||||
paths := pathsFromIngressPaths(rule.paths())
|
||||
|
||||
scheme := "http"
|
||||
_, isTLS := tlsHosts[rule.host()]
|
||||
if isTLS {
|
||||
scheme = "https"
|
||||
out:
|
||||
for _, pattern := range ingress.tlsHosts() {
|
||||
if matchesHostnamePattern(pattern, rule.host()) {
|
||||
scheme = "https"
|
||||
break out
|
||||
}
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
|
@ -219,3 +218,33 @@ func (i *Ingress) buildIngress(ingress ingressAdaptor) *targetgroup.Group {
|
|||
|
||||
return tg
|
||||
}
|
||||
|
||||
// matchesHostnamePattern returns true if the host matches a wildcard DNS
|
||||
// pattern or pattern and host are equal.
|
||||
func matchesHostnamePattern(pattern, host string) bool {
|
||||
if pattern == host {
|
||||
return true
|
||||
}
|
||||
|
||||
patternParts := strings.Split(pattern, ".")
|
||||
hostParts := strings.Split(host, ".")
|
||||
|
||||
// If the first element of the pattern is not a wildcard, give up.
|
||||
if len(patternParts) == 0 || patternParts[0] != "*" {
|
||||
return false
|
||||
}
|
||||
|
||||
// A wildcard match require the pattern to have the same length as the host
|
||||
// path.
|
||||
if len(patternParts) != len(hostParts) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 1; i < len(patternParts); i++ {
|
||||
if patternParts[i] != hostParts[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ const (
|
|||
TLSNo TLSMode = iota
|
||||
TLSYes
|
||||
TLSMixed
|
||||
TLSWildcard
|
||||
)
|
||||
|
||||
func makeIngress(tls TLSMode) *v1.Ingress {
|
||||
|
@ -81,6 +82,8 @@ func makeIngress(tls TLSMode) *v1.Ingress {
|
|||
ret.Spec.TLS = []v1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}}
|
||||
case TLSMixed:
|
||||
ret.Spec.TLS = []v1.IngressTLS{{Hosts: []string{"example.com"}}}
|
||||
case TLSWildcard:
|
||||
ret.Spec.TLS = []v1.IngressTLS{{Hosts: []string{"*.example.com"}}}
|
||||
}
|
||||
|
||||
return ret
|
||||
|
@ -133,6 +136,8 @@ func makeIngressV1beta1(tls TLSMode) *v1beta1.Ingress {
|
|||
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}}
|
||||
case TLSMixed:
|
||||
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com"}}}
|
||||
case TLSWildcard:
|
||||
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"*.example.com"}}}
|
||||
}
|
||||
|
||||
return ret
|
||||
|
@ -152,6 +157,8 @@ func expectedTargetGroups(ns string, tls TLSMode) map[string]*targetgroup.Group
|
|||
scheme2 = "https"
|
||||
case TLSMixed:
|
||||
scheme1 = "https"
|
||||
case TLSWildcard:
|
||||
scheme2 = "https"
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("ingress/%s/testingress", ns)
|
||||
|
|
|
@ -2172,6 +2172,9 @@ it was not set during relabeling. The `__scheme__` and `__metrics_path__` labels
|
|||
are set to the scheme and metrics path of the target respectively. The `__param_<name>`
|
||||
label is set to the value of the first passed URL parameter called `<name>`.
|
||||
|
||||
The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's
|
||||
interval and timeout. This is **experimental** and could change in the future.
|
||||
|
||||
Additional labels prefixed with `__meta_` may be available during the
|
||||
relabeling phase. They are set by the service discovery mechanism that provided
|
||||
the target and vary between mechanisms.
|
||||
|
|
|
@ -34,7 +34,7 @@ that PromQL does not look ahead of the evaluation time for samples.
|
|||
`--enable-feature=promql-negative-offset`
|
||||
|
||||
In contrast to the positive offset modifier, the negative offset modifier lets
|
||||
one shift a vector selector into the future. An example in which one may want
|
||||
one shift a vector selector into the future. An example in which one may want
|
||||
to use a negative offset is reviewing past data and making temporal comparisons
|
||||
with more recent data.
|
||||
|
||||
|
@ -59,5 +59,15 @@ Exemplar storage is implemented as a fixed size circular buffer that stores exem
|
|||
`--enable-feature=memory-snapshot-on-shutdown`
|
||||
|
||||
This takes the snapshot of the chunks that are in memory along with the series information when shutting down and stores
|
||||
it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped
|
||||
it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped
|
||||
chunks without the need of WAL replay.
|
||||
|
||||
## Extra Scrape Metrics
|
||||
|
||||
`--enable-feature=extra-scrape-metrics`
|
||||
|
||||
When enabled, for each instance scrape, Prometheus stores a sample in the following additional time series:
|
||||
|
||||
- `scrape_timeout_seconds`. The configured `scrape_timeout` for a target. This allows you to measure each target to find out how close they are to timing out with `scrape_duration_seconds / scrape_timeout_seconds`.
|
||||
- `scrape_sample_limit`. The configured `sample_limit` for a target. This allows you to measure each target
|
||||
to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`.
|
||||
|
|
|
@ -502,7 +502,9 @@ $ curl http://localhost:9090/api/v1/targets
|
|||
"lastError": "",
|
||||
"lastScrape": "2017-01-17T15:07:44.723715405+01:00",
|
||||
"lastScrapeDuration": 0.050688943,
|
||||
"health": "up"
|
||||
"health": "up",
|
||||
"scrapeInterval": "1m",
|
||||
"scrapeTimeout": "10s"
|
||||
}
|
||||
],
|
||||
"droppedTargets": [
|
||||
|
@ -511,6 +513,8 @@ $ curl http://localhost:9090/api/v1/targets
|
|||
"__address__": "127.0.0.1:9100",
|
||||
"__metrics_path__": "/metrics",
|
||||
"__scheme__": "http",
|
||||
"__scrape_interval__": "1m",
|
||||
"__scrape_timeout__": "10s",
|
||||
"job": "node"
|
||||
},
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ replayed when the Prometheus server restarts. Write-ahead log files are stored
|
|||
in the `wal` directory in 128MB segments. These files contain raw data that
|
||||
has not yet been compacted; thus they are significantly larger than regular block
|
||||
files. Prometheus will retain a minimum of three write-ahead log files.
|
||||
High-traffic servers may retain more than three WAL files in order to to keep at
|
||||
High-traffic servers may retain more than three WAL files in order to keep at
|
||||
least two hours of raw data.
|
||||
|
||||
A Prometheus server's data directory looks something like this:
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -217,14 +216,7 @@ func TestGenerateTargetGroups(t *testing.T) {
|
|||
|
||||
for _, testCase := range testCases {
|
||||
result := generateTargetGroups(testCase.targetGroup)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedCustomSD) {
|
||||
t.Errorf("%q failed\ngot: %#v\nexpected: %v",
|
||||
testCase.title,
|
||||
result,
|
||||
testCase.expectedCustomSD)
|
||||
}
|
||||
|
||||
require.Equal(t, testCase.expectedCustomSD, result)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -32,17 +33,13 @@ func TestEscape(t *testing.T) {
|
|||
value := "abzABZ019(){},'\"\\"
|
||||
expected := "abzABZ019\\(\\)\\{\\}\\,\\'\\\"\\\\"
|
||||
actual := escape(model.LabelValue(value))
|
||||
if expected != actual {
|
||||
t.Errorf("Expected %s, got %s", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
// Test percent-encoding.
|
||||
value = "é/|_;:%."
|
||||
expected = "%C3%A9%2F|_;:%25%2E"
|
||||
actual = escape(model.LabelValue(value))
|
||||
if expected != actual {
|
||||
t.Errorf("Expected %s, got %s", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestPathFromMetric(t *testing.T) {
|
||||
|
@ -51,7 +48,5 @@ func TestPathFromMetric(t *testing.T) {
|
|||
".many_chars.abc!ABC:012-3!45%C3%B667~89%2E%2F\\(\\)\\{\\}\\,%3D%2E\\\"\\\\" +
|
||||
".testlabel.test:value")
|
||||
actual := pathFromMetric(metric, "prefix.")
|
||||
if expected != actual {
|
||||
t.Errorf("Expected %s, got %s", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
influx "github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
|
@ -73,28 +74,17 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123
|
|||
|
||||
server := httptest.NewServer(http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "POST" {
|
||||
t.Fatalf("Unexpected method; expected POST, got %s", r.Method)
|
||||
}
|
||||
if r.URL.Path != "/write" {
|
||||
t.Fatalf("Unexpected path; expected %s, got %s", "/write", r.URL.Path)
|
||||
}
|
||||
require.Equal(t, "POST", r.Method, "Unexpected method.")
|
||||
require.Equal(t, "/write", r.URL.Path, "Unexpected path.")
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading body: %s", err)
|
||||
}
|
||||
|
||||
if string(b) != expectedBody {
|
||||
t.Fatalf("Unexpected request body; expected:\n\n%s\n\ngot:\n\n%s", expectedBody, string(b))
|
||||
}
|
||||
require.NoError(t, err, "Error reading body.")
|
||||
require.Equal(t, expectedBody, string(b), "Unexpected request body.")
|
||||
},
|
||||
))
|
||||
defer server.Close()
|
||||
|
||||
serverURL, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to parse server URL %s: %s", server.URL, err)
|
||||
}
|
||||
require.NoError(t, err, "Unable to parse server URL.")
|
||||
|
||||
conf := influx.HTTPConfig{
|
||||
Addr: serverURL.String(),
|
||||
|
@ -103,8 +93,6 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123
|
|||
Timeout: time.Minute,
|
||||
}
|
||||
c := NewClient(nil, conf, "test_db", "default")
|
||||
|
||||
if err := c.Write(samples); err != nil {
|
||||
t.Fatalf("Error sending samples: %s", err)
|
||||
}
|
||||
err = c.Write(samples)
|
||||
require.NoError(t, err, "Error sending samples.")
|
||||
}
|
||||
|
|
|
@ -14,12 +14,11 @@
|
|||
package opentsdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -36,9 +35,7 @@ func TestTagsFromMetric(t *testing.T) {
|
|||
"many_chars": TagValue("abc!ABC:012-3!45ö67~89./"),
|
||||
}
|
||||
actual := tagsFromMetric(metric)
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("Expected %#v, got %#v", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestMarshalStoreSamplesRequest(t *testing.T) {
|
||||
|
@ -51,25 +48,11 @@ func TestMarshalStoreSamplesRequest(t *testing.T) {
|
|||
expectedJSON := []byte(`{"metric":"test_.metric","timestamp":4711,"value":3.1415,"tags":{"many_chars":"abc_21ABC_.012-3_2145_C3_B667_7E89./","testlabel":"test_.value"}}`)
|
||||
|
||||
resultingJSON, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal(request) resulted in err: %s", err)
|
||||
}
|
||||
if !bytes.Equal(resultingJSON, expectedJSON) {
|
||||
t.Errorf(
|
||||
"Marshal(request) => %q, want %q",
|
||||
resultingJSON, expectedJSON,
|
||||
)
|
||||
}
|
||||
require.NoError(t, err, "Marshal(request) resulted in err.")
|
||||
require.Equal(t, expectedJSON, resultingJSON)
|
||||
|
||||
var unmarshaledRequest StoreSamplesRequest
|
||||
err = json.Unmarshal(expectedJSON, &unmarshaledRequest)
|
||||
if err != nil {
|
||||
t.Fatalf("Unmarshal(expectedJSON, &unmarshaledRequest) resulted in err: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(unmarshaledRequest, request) {
|
||||
t.Errorf(
|
||||
"Unmarshal(expectedJSON, &unmarshaledRequest) => %#v, want %#v",
|
||||
unmarshaledRequest, request,
|
||||
)
|
||||
}
|
||||
require.NoError(t, err, "Unmarshal(expectedJSON, &unmarshaledRequest) resulted in err.")
|
||||
require.Equal(t, request, unmarshaledRequest)
|
||||
}
|
||||
|
|
|
@ -14,9 +14,10 @@
|
|||
package opentsdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var stringtests = []struct {
|
||||
|
@ -32,17 +33,9 @@ var stringtests = []struct {
|
|||
|
||||
func TestTagValueMarshaling(t *testing.T) {
|
||||
for i, tt := range stringtests {
|
||||
json, err := json.Marshal(tt.tv)
|
||||
if err != nil {
|
||||
t.Errorf("%d. Marshal(%q) returned err: %s", i, tt.tv, err)
|
||||
} else {
|
||||
if !bytes.Equal(json, tt.json) {
|
||||
t.Errorf(
|
||||
"%d. Marshal(%q) => %q, want %q",
|
||||
i, tt.tv, json, tt.json,
|
||||
)
|
||||
}
|
||||
}
|
||||
got, err := json.Marshal(tt.tv)
|
||||
require.NoError(t, err, "%d. Marshal(%q) returned error.", i, tt.tv)
|
||||
require.Equal(t, tt.json, got, "%d. Marshal(%q) not equal.", i, tt.tv)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,15 +43,7 @@ func TestTagValueUnMarshaling(t *testing.T) {
|
|||
for i, tt := range stringtests {
|
||||
var tv TagValue
|
||||
err := json.Unmarshal(tt.json, &tv)
|
||||
if err != nil {
|
||||
t.Errorf("%d. Unmarshal(%q, &str) returned err: %s", i, tt.json, err)
|
||||
} else {
|
||||
if tv != tt.tv {
|
||||
t.Errorf(
|
||||
"%d. Unmarshal(%q, &str) => str==%q, want %q",
|
||||
i, tt.json, tv, tt.tv,
|
||||
)
|
||||
}
|
||||
}
|
||||
require.NoError(t, err, "%d. Unmarshal(%q, &str) returned error.", i, tt.json)
|
||||
require.Equal(t, tt.tv, tv, "%d. Unmarshal(%q, &str) not equal.", i, tt.json)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,9 +9,9 @@ and dashboards for Prometheus.
|
|||
To use them, you need to have `jsonnet` (v0.13+) and `jb` installed. If you
|
||||
have a working Go development environment, it's easiest to run the following:
|
||||
```bash
|
||||
$ go get github.com/google/go-jsonnet/cmd/jsonnet
|
||||
$ go get github.com/google/go-jsonnet/cmd/jsonnetfmt
|
||||
$ go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
|
||||
$ go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||
$ go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||
$ go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
|
||||
```
|
||||
|
||||
_Note: The make targets `lint` and `fmt` need the `jsonnetfmt` binary, which is
|
||||
|
|
|
@ -12,7 +12,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//+build tools
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
// Package tools tracks dependencies for tools that used in the build process.
|
||||
// See https://github.com/golang/go/issues/25922
|
||||
|
|
47
go.mod
47
go.mod
|
@ -3,37 +3,37 @@ module github.com/prometheus/prometheus
|
|||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go v55.8.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.19
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.14
|
||||
github.com/Azure/azure-sdk-for-go v57.1.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.20
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.15
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15
|
||||
github.com/aws/aws-sdk-go v1.40.10
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/aws/aws-sdk-go v1.40.37
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/containerd/containerd v1.5.4 // indirect
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
|
||||
github.com/digitalocean/godo v1.64.2
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
github.com/digitalocean/godo v1.65.0
|
||||
github.com/docker/docker v20.10.8+incompatible
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0
|
||||
github.com/envoyproxy/go-control-plane v0.9.9
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.1
|
||||
github.com/go-kit/log v0.1.0
|
||||
github.com/go-logfmt/logfmt v0.5.0
|
||||
github.com/go-openapi/strfmt v0.20.1
|
||||
github.com/go-logfmt/logfmt v0.5.1
|
||||
github.com/go-openapi/strfmt v0.20.2
|
||||
github.com/go-zookeeper/zk v1.0.2
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/pprof v0.0.0-20210726183535-c50bf4fe5303
|
||||
github.com/gophercloud/gophercloud v0.19.0
|
||||
github.com/google/pprof v0.0.0-20210827144239-02619b876842
|
||||
github.com/gophercloud/gophercloud v0.20.0
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.9.1
|
||||
github.com/hetznercloud/hcloud-go v1.28.0
|
||||
github.com/hashicorp/consul/api v1.10.1
|
||||
github.com/hetznercloud/hcloud-go v1.32.0
|
||||
github.com/influxdata/influxdb v1.9.3
|
||||
github.com/json-iterator/go v1.1.11
|
||||
github.com/linode/linodego v0.31.0
|
||||
github.com/linode/linodego v0.32.0
|
||||
github.com/miekg/dns v1.1.43
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
|
@ -43,10 +43,11 @@ require (
|
|||
github.com/opentracing-contrib/go-stdlib v1.0.0
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/alertmanager v0.22.2
|
||||
github.com/prometheus/alertmanager v0.23.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.30.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.6.1
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
|
@ -56,22 +57,22 @@ require (
|
|||
github.com/uber/jaeger-lib v2.4.1+incompatible
|
||||
go.uber.org/atomic v1.9.0
|
||||
go.uber.org/goleak v1.1.10
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914
|
||||
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
|
||||
golang.org/x/tools v0.1.5
|
||||
google.golang.org/api v0.51.0
|
||||
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea
|
||||
google.golang.org/api v0.56.0
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83
|
||||
google.golang.org/protobuf v1.27.1
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
gopkg.in/fsnotify/fsnotify.v1 v1.4.7
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
k8s.io/api v0.21.3
|
||||
k8s.io/apimachinery v0.21.3
|
||||
k8s.io/client-go v0.21.3
|
||||
k8s.io/api v0.22.1
|
||||
k8s.io/apimachinery v0.22.1
|
||||
k8s.io/client-go v0.22.1
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.10.0
|
||||
)
|
||||
|
|
176
go.sum
176
go.sum
|
@ -22,8 +22,10 @@ cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb
|
|||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
|
||||
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||
cloud.google.com/go v0.87.0 h1:8ZtzmY4a2JIO2sljMbpqkDYxA8aJQveYr3AMa+X40oc=
|
||||
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -47,8 +49,8 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v55.8.0+incompatible h1:EuccMPzxu67cIE95/mrtwQivLv7ETmURi5IUgLNVug8=
|
||||
github.com/Azure/azure-sdk-for-go v55.8.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v57.1.0+incompatible h1:TKQ3ieyB0vVKkF6t9dsWbMjq56O1xU3eh3Ec09v6ajM=
|
||||
github.com/Azure/azure-sdk-for-go v57.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
|
@ -59,9 +61,9 @@ github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8
|
|||
github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
|
||||
github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.19 h1:7/IqD2fEYVha1EPeaiytVKhzmPV223pfkRIQUGOK2IE=
|
||||
github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
|
||||
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
|
||||
github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M=
|
||||
github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
|
@ -70,8 +72,8 @@ github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMl
|
|||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.14 h1:G8hexQdV5D4khOXrWG2YuLCFKhWYmWD8bHYaXN5ophk=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
|
@ -180,8 +182,10 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
|
|||
github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
|
||||
github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.40.10 h1:h+xUINuuH/9CwxE7O8mAuW7Aj9E5agfE9jQ3DrJsnA8=
|
||||
github.com/aws/aws-sdk-go v1.40.10/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.40.37 h1:I+Q6cLctkFyMMrKukcDnj+i2kjrQ37LGiOM6xmsxC48=
|
||||
github.com/aws/aws-sdk-go v1.40.37/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
||||
github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps=
|
||||
|
@ -208,13 +212,14 @@ github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.
|
|||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
|
@ -360,8 +365,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
|
|||
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/digitalocean/godo v1.64.2 h1:lJEB2TVIkJydFWJMPtdYOPa2Xwib+smZqq/oUZF8/iA=
|
||||
github.com/digitalocean/godo v1.64.2/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
|
||||
github.com/digitalocean/godo v1.65.0 h1:3SywGJBC18HaYtPQF+T36jYzXBi+a6eIMonSjDll7TA=
|
||||
github.com/digitalocean/godo v1.65.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
|
@ -369,8 +374,8 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT
|
|||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ=
|
||||
github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM=
|
||||
github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
|
@ -409,14 +414,15 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
|||
github.com/envoyproxy/protoc-gen-validate v0.6.1 h1:4CF52PCseTFt4bE+Yk3dIpdVi7XWuPVMhPtm4FaIJPM=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
|
@ -430,6 +436,8 @@ github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW
|
|||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
|
||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
|
@ -448,8 +456,9 @@ github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=
|
|||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
|
||||
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
|
@ -504,8 +513,8 @@ github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29g
|
|||
github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo=
|
||||
github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98=
|
||||
github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk=
|
||||
github.com/go-openapi/runtime v0.19.28 h1:9lYu6axek8LJrVkMVViVirRcpoaCxXX7+sSvmizGVnA=
|
||||
github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M=
|
||||
github.com/go-openapi/runtime v0.19.29 h1:5IIvCaIDbxetN674vX9eOxvoZ9mYGQ16fV1Q0VSG+NA=
|
||||
github.com/go-openapi/runtime v0.19.29/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
|
@ -528,8 +537,9 @@ github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk
|
|||
github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
|
||||
github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
|
||||
github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
|
||||
github.com/go-openapi/strfmt v0.20.1 h1:1VgxvehFne1mbChGeCmZ5pc0LxUf6yaACVSIYAR91Xc=
|
||||
github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
|
||||
github.com/go-openapi/strfmt v0.20.2 h1:6XZL+fF4VZYFxKQGLAUB358hOrRh/wS51uWEtlONADE=
|
||||
github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
|
@ -552,6 +562,8 @@ github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9G
|
|||
github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0=
|
||||
github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts=
|
||||
github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0=
|
||||
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
|
||||
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
|
||||
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY=
|
||||
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
|
@ -603,6 +615,8 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
|||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
|
@ -612,8 +626,9 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er
|
|||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
|
@ -649,8 +664,9 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
|||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
@ -689,8 +705,9 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210726183535-c50bf4fe5303 h1:UP+GUfRSErTzM7UtTMt4TjnfXKK3ybrGQasgSOp7vLY=
|
||||
github.com/google/pprof v0.0.0-20210726183535-c50bf4fe5303/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210827144239-02619b876842 h1:JCrt5MIE1fHQtdy1825HwJ45oVQaqHE6lgssRhjcg/o=
|
||||
github.com/google/pprof v0.0.0-20210827144239-02619b876842/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -698,16 +715,19 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
|||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
|
||||
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss=
|
||||
github.com/gophercloud/gophercloud v0.19.0 h1:zzaIh8W2K5M4AkJhPV1z6O4Sp0FOObzXm61NUmFz3Kw=
|
||||
github.com/gophercloud/gophercloud v0.19.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
|
||||
github.com/gophercloud/gophercloud v0.20.0 h1:1+4jrsjVhdX5omlAo4jkmFc6ftLbuXLzgFo4i6lH+Gk=
|
||||
github.com/gophercloud/gophercloud v0.20.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
|
@ -730,8 +750,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU=
|
||||
github.com/hashicorp/consul/api v1.9.1 h1:SngrdG2L62qqLsUz85qcPhFZ78rPf8tcD5qjMgs6MME=
|
||||
github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
|
||||
github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw=
|
||||
github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
|
||||
github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
|
||||
|
@ -779,14 +799,14 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
|
|||
github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/memberlist v0.2.3 h1:BwZa5IjREr75J0am7nblP+X5i95Rmp8EEbMI5vkUWdA=
|
||||
github.com/hashicorp/memberlist v0.2.3/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/memberlist v0.2.4 h1:OOhYzSvFnkFQXm1ysE8RjXTHsqSRDyP4emusC9K7DYg=
|
||||
github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU=
|
||||
github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM=
|
||||
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
|
||||
github.com/hetznercloud/hcloud-go v1.28.0 h1:T2a0CVGETf7BoWIdZ/TACqmTZAa/ROutcfdUHYiPAQ4=
|
||||
github.com/hetznercloud/hcloud-go v1.28.0/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI=
|
||||
github.com/hetznercloud/hcloud-go v1.32.0 h1:7zyN2V7hMlhm3HZdxOarmOtvzKvkcYKjM0hcwYMQZz0=
|
||||
github.com/hetznercloud/hcloud-go v1.32.0/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
|
@ -882,11 +902,12 @@ github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LE
|
|||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v0.31.0 h1:99+t6GtTaIsL+ncz5BpCbkh8Y90rC7qW1Not8MFe0Gw=
|
||||
github.com/linode/linodego v0.31.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM=
|
||||
github.com/linode/linodego v0.32.0 h1:IK04cx2b/IwAAd6XLruf1Dl/n3dRXj87Uw/5qo6afVU=
|
||||
github.com/linode/linodego v0.32.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM=
|
||||
github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
|
@ -1005,13 +1026,15 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
|||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
|
@ -1082,8 +1105,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
|
|||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg=
|
||||
github.com/prometheus/alertmanager v0.22.2 h1:JrDZalSEMb2/2bqGAhls6ZnvOxbC5jMIu29JV+uWTC0=
|
||||
github.com/prometheus/alertmanager v0.22.2/go.mod h1:rYinOWxFuCnNssc3iOjn2oMTlhLaPcUuqV5yk5JKUAE=
|
||||
github.com/prometheus/alertmanager v0.23.0 h1:KIb9IChC3kg+1CC388qfr7bsT+tARpQqdsCMoatdObA=
|
||||
github.com/prometheus/alertmanager v0.23.0/go.mod h1:0MLTrjQI8EuVmvykEhcfr/7X0xmaDAZrqMgxIq3OXHk=
|
||||
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
|
@ -1095,7 +1118,6 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
|
|||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU=
|
||||
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
|
@ -1115,14 +1137,12 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+
|
|||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug=
|
||||
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0=
|
||||
github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
|
@ -1149,7 +1169,7 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
|||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
|
@ -1209,6 +1229,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
|||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
|
@ -1247,6 +1268,8 @@ github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6
|
|||
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
|
@ -1361,7 +1384,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
|
@ -1451,6 +1473,7 @@ golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/
|
|||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
|
@ -1464,16 +1487,16 @@ golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 h1:4CSI6oo7cOjJKajidEljs9h+uP0rRZBPPPhcCbj5mw8=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg=
|
||||
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1486,8 +1509,10 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 h1:3B43BWw0xEBsLZ/NO1VALz6fppU3481pik+2Ksv45z8=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1569,6 +1594,7 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1593,22 +1619,22 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
|
@ -1751,8 +1777,10 @@ google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk
|
|||
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
|
||||
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
|
||||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0 h1:SQaA2Cx57B+iPw2MBgyjEkoeMkRK2IenSGoia0U3lCk=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI=
|
||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1799,6 +1827,7 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY
|
|||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
|
@ -1815,8 +1844,14 @@ google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxH
|
|||
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea h1:8ZyCcgugUqamxp/vZSEJw9CMy7VZlSWYJLLJPi/dSDA=
|
||||
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 h1:3V2dxSZpz4zozWWUq36vUxXEKnSYitEH2LdsAx+RUmg=
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
|
@ -1847,8 +1882,10 @@ google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI=
|
||||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
@ -1880,6 +1917,8 @@ gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBl
|
|||
gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
||||
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
|
@ -1923,14 +1962,14 @@ k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY=
|
|||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
|
||||
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
|
||||
k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ=
|
||||
k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg=
|
||||
k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
|
||||
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
|
||||
k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0=
|
||||
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
|
||||
k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII=
|
||||
k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
|
||||
k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
|
||||
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
|
||||
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
|
||||
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
|
||||
|
@ -1938,8 +1977,8 @@ k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo=
|
|||
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
|
||||
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
|
||||
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
|
||||
k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg=
|
||||
k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU=
|
||||
k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
|
||||
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
|
||||
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
|
||||
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
|
||||
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
|
||||
|
@ -1951,13 +1990,14 @@ k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8
|
|||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
|
||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
|
|
|
@ -392,7 +392,7 @@ func TestHandlerQueuing(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
return
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("Alerts were not pushed")
|
||||
require.FailNow(t, "Alerts were not pushed.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ func TestHandlerQueuing(t *testing.T) {
|
|||
case err := <-errc:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("Alerts were not pushed")
|
||||
require.FailNow(t, "Alerts were not pushed.")
|
||||
}
|
||||
|
||||
// Verify that we receive the last 3 batches.
|
||||
|
@ -480,14 +480,12 @@ alerting:
|
|||
alertmanagers:
|
||||
- static_configs:
|
||||
`
|
||||
if err := yaml.UnmarshalStrict([]byte(s), cfg); err != nil {
|
||||
t.Fatalf("Unable to load YAML config: %s", err)
|
||||
}
|
||||
err := yaml.UnmarshalStrict([]byte(s), cfg)
|
||||
require.NoError(t, err, "Unable to load YAML config.")
|
||||
require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
|
||||
|
||||
if err := n.ApplyConfig(cfg); err != nil {
|
||||
t.Fatalf("Error Applying the config:%v", err)
|
||||
}
|
||||
err = n.ApplyConfig(cfg)
|
||||
require.NoError(t, err, "Error applying the config.")
|
||||
|
||||
tgs := make(map[string][]*targetgroup.Group)
|
||||
for _, tt := range tests {
|
||||
|
@ -534,14 +532,12 @@ alerting:
|
|||
regex: 'alertmanager:9093'
|
||||
action: drop
|
||||
`
|
||||
if err := yaml.UnmarshalStrict([]byte(s), cfg); err != nil {
|
||||
t.Fatalf("Unable to load YAML config: %s", err)
|
||||
}
|
||||
err := yaml.UnmarshalStrict([]byte(s), cfg)
|
||||
require.NoError(t, err, "Unable to load YAML config.")
|
||||
require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
|
||||
|
||||
if err := n.ApplyConfig(cfg); err != nil {
|
||||
t.Fatalf("Error Applying the config:%v", err)
|
||||
}
|
||||
err = n.ApplyConfig(cfg)
|
||||
require.NoError(t, err, "Error applying the config.")
|
||||
|
||||
tgs := make(map[string][]*targetgroup.Group)
|
||||
for _, tt := range tests {
|
||||
|
|
|
@ -100,6 +100,9 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if c.Regex.Regexp == nil {
|
||||
c.Regex = MustNewRegexp("")
|
||||
}
|
||||
if c.Action == "" {
|
||||
return errors.Errorf("relabel action cannot be empty")
|
||||
}
|
||||
if c.Modulus == 0 && c.Action == HashMod {
|
||||
return errors.Errorf("relabel configuration for hashmod requires non-zero modulus")
|
||||
}
|
||||
|
|
|
@ -239,6 +239,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) {
|
|||
model.Time(timestamp.FromTime(time.Now())),
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
return tmpl.ParseTest()
|
||||
}
|
||||
|
|
|
@ -15,19 +15,14 @@ package rulefmt
|
|||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseFileSuccess(t *testing.T) {
|
||||
if _, errs := ParseFile("testdata/test.yaml"); len(errs) > 0 {
|
||||
t.Errorf("unexpected errors parsing file")
|
||||
for _, err := range errs {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
_, errs := ParseFile("testdata/test.yaml")
|
||||
require.Empty(t, errs, "unexpected errors parsing file")
|
||||
}
|
||||
|
||||
func TestParseFileFailure(t *testing.T) {
|
||||
|
@ -79,15 +74,9 @@ func TestParseFileFailure(t *testing.T) {
|
|||
|
||||
for _, c := range table {
|
||||
_, errs := ParseFile(filepath.Join("testdata", c.filename))
|
||||
if errs == nil {
|
||||
t.Errorf("Expected error parsing %s but got none", c.filename)
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(errs[0].Error(), c.errMsg) {
|
||||
t.Errorf("Expected error for %s to contain %q but got: %s", c.filename, c.errMsg, errs)
|
||||
}
|
||||
require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename)
|
||||
require.Error(t, errs[0], c.errMsg, "Expected error for %s.", c.filename)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestTemplateParsing(t *testing.T) {
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build openbsd || windows || netbsd || solaris
|
||||
// +build openbsd windows netbsd solaris
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows,!openbsd,!netbsd,!solaris
|
||||
// +build !386
|
||||
//go:build !windows && !openbsd && !netbsd && !solaris && !386
|
||||
// +build !windows,!openbsd,!netbsd,!solaris,!386
|
||||
|
||||
package runtime
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux && 386
|
||||
// +build linux,386
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build (386 && darwin) || (386 && freebsd)
|
||||
// +build 386,darwin 386,freebsd
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
// +build !openbsd
|
||||
//go:build !windows && !openbsd
|
||||
// +build !windows,!openbsd
|
||||
|
||||
package runtime
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build openbsd
|
||||
// +build openbsd
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -306,11 +306,10 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
|
|||
|
||||
t2 := p.nextToken()
|
||||
if t2 == tBraceOpen {
|
||||
offsets, err := p.parseLVals()
|
||||
p.offsets, err = p.parseLVals(p.offsets)
|
||||
if err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
p.offsets = append(p.offsets, offsets...)
|
||||
p.series = p.l.b[p.start:p.l.i]
|
||||
t2 = p.nextToken()
|
||||
}
|
||||
|
@ -367,12 +366,12 @@ func (p *OpenMetricsParser) parseComment() error {
|
|||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
// Parse the labels.
|
||||
offsets, err := p.parseLVals()
|
||||
p.eOffsets, err = p.parseLVals(p.eOffsets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.eOffsets = append(p.eOffsets, offsets...)
|
||||
p.exemplar = p.l.b[p.start:p.l.i]
|
||||
|
||||
// Get the value.
|
||||
|
@ -410,8 +409,7 @@ func (p *OpenMetricsParser) parseComment() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *OpenMetricsParser) parseLVals() ([]int, error) {
|
||||
var offsets []int
|
||||
func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) {
|
||||
first := true
|
||||
for {
|
||||
t := p.nextToken()
|
||||
|
|
|
@ -72,7 +72,7 @@ func TestQueryConcurrency(t *testing.T) {
|
|||
case <-processing:
|
||||
// Expected.
|
||||
case <-time.After(20 * time.Millisecond):
|
||||
t.Fatalf("Query within concurrency threshold not being executed")
|
||||
require.Fail(t, "Query within concurrency threshold not being executed")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ func TestQueryConcurrency(t *testing.T) {
|
|||
|
||||
select {
|
||||
case <-processing:
|
||||
t.Fatalf("Query above concurrency threshold being executed")
|
||||
require.Fail(t, "Query above concurrency threshold being executed")
|
||||
case <-time.After(20 * time.Millisecond):
|
||||
// Expected.
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func TestQueryConcurrency(t *testing.T) {
|
|||
case <-processing:
|
||||
// Expected.
|
||||
case <-time.After(20 * time.Millisecond):
|
||||
t.Fatalf("Query within concurrency threshold not being executed")
|
||||
require.Fail(t, "Query within concurrency threshold not being executed")
|
||||
}
|
||||
|
||||
// Terminate remaining queries.
|
||||
|
@ -604,7 +604,7 @@ func TestEngineShutdown(t *testing.T) {
|
|||
require.Equal(t, errQueryCanceled, res.Err)
|
||||
|
||||
query2 := engine.newTestQuery(func(context.Context) error {
|
||||
t.Fatalf("reached query execution unexpectedly")
|
||||
require.FailNow(t, "reached query execution unexpectedly")
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -1121,9 +1121,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) {
|
|||
//nolint:govet
|
||||
a[123] = 1
|
||||
|
||||
if err.Error() != "unexpected error" {
|
||||
t.Fatalf("wrong error message: %q, expected %q", err, "unexpected error")
|
||||
}
|
||||
require.EqualError(t, err, "unexpected error")
|
||||
}
|
||||
|
||||
func TestRecoverEvaluatorError(t *testing.T) {
|
||||
|
@ -1133,9 +1131,7 @@ func TestRecoverEvaluatorError(t *testing.T) {
|
|||
e := errors.New("custom error")
|
||||
|
||||
defer func() {
|
||||
if err.Error() != e.Error() {
|
||||
t.Fatalf("wrong error message: %q, expected %q", err, e)
|
||||
}
|
||||
require.EqualError(t, err, e.Error())
|
||||
}()
|
||||
defer ev.recover(nil, &err)
|
||||
|
||||
|
@ -1154,12 +1150,8 @@ func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) {
|
|||
}
|
||||
|
||||
defer func() {
|
||||
if err.Error() != e.Error() {
|
||||
t.Fatalf("wrong error message: %q, expected %q", err, e)
|
||||
}
|
||||
if len(ws) != len(warnings) && ws[0] != warnings[0] {
|
||||
t.Fatalf("wrong warning message: %q, expected %q", ws[0], warnings[0])
|
||||
}
|
||||
require.EqualError(t, err, e.Error())
|
||||
require.Equal(t, warnings, ws, "wrong warning message")
|
||||
}()
|
||||
defer ev.recover(&ws, &err)
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
// Only build when go-fuzz is in use
|
||||
//go:build gofuzz
|
||||
// +build gofuzz
|
||||
|
||||
package promql
|
||||
|
|
|
@ -737,13 +737,13 @@ func TestLexer(t *testing.T) {
|
|||
}
|
||||
if !hasError {
|
||||
t.Logf("%d: input %q", i, test.input)
|
||||
t.Fatalf("expected lexing error but did not fail")
|
||||
require.Fail(t, "expected lexing error but did not fail")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if lastItem.Typ == ERROR {
|
||||
t.Logf("%d: input %q", i, test.input)
|
||||
t.Fatalf("unexpected lexing error at position %d: %s", lastItem.Pos, lastItem)
|
||||
require.Fail(t, "unexpected lexing error at position %d: %s", lastItem.Pos, lastItem)
|
||||
}
|
||||
|
||||
eofItem := Item{EOF, Pos(len(test.input)), ""}
|
||||
|
|
|
@ -182,7 +182,10 @@ func (node *UnaryExpr) String() string {
|
|||
}
|
||||
|
||||
func (node *VectorSelector) String() string {
|
||||
labelStrings := make([]string, 0, len(node.LabelMatchers)-1)
|
||||
var labelStrings []string
|
||||
if len(node.LabelMatchers) > 1 {
|
||||
labelStrings = make([]string, 0, len(node.LabelMatchers)-1)
|
||||
}
|
||||
for _, matcher := range node.LabelMatchers {
|
||||
// Only include the __name__ label if its equality matching and matches the name.
|
||||
if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name {
|
||||
|
|
|
@ -16,6 +16,8 @@ package parser
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -138,3 +140,76 @@ func TestExprString(t *testing.T) {
|
|||
require.Equal(t, exp, expr.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestVectorSelector_String(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
vs VectorSelector
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "empty value",
|
||||
vs: VectorSelector{},
|
||||
expected: ``,
|
||||
},
|
||||
{
|
||||
name: "no matchers with name",
|
||||
vs: VectorSelector{Name: "foobar"},
|
||||
expected: `foobar`,
|
||||
},
|
||||
{
|
||||
name: "one matcher with name",
|
||||
vs: VectorSelector{
|
||||
Name: "foobar",
|
||||
LabelMatchers: []*labels.Matcher{
|
||||
labels.MustNewMatcher(labels.MatchEqual, "a", "x"),
|
||||
},
|
||||
},
|
||||
expected: `foobar{a="x"}`,
|
||||
},
|
||||
{
|
||||
name: "two matchers with name",
|
||||
vs: VectorSelector{
|
||||
Name: "foobar",
|
||||
LabelMatchers: []*labels.Matcher{
|
||||
labels.MustNewMatcher(labels.MatchEqual, "a", "x"),
|
||||
labels.MustNewMatcher(labels.MatchEqual, "b", "y"),
|
||||
},
|
||||
},
|
||||
expected: `foobar{a="x",b="y"}`,
|
||||
},
|
||||
{
|
||||
name: "two matchers without name",
|
||||
vs: VectorSelector{
|
||||
LabelMatchers: []*labels.Matcher{
|
||||
labels.MustNewMatcher(labels.MatchEqual, "a", "x"),
|
||||
labels.MustNewMatcher(labels.MatchEqual, "b", "y"),
|
||||
},
|
||||
},
|
||||
expected: `{a="x",b="y"}`,
|
||||
},
|
||||
{
|
||||
name: "name matcher and name",
|
||||
vs: VectorSelector{
|
||||
Name: "foobar",
|
||||
LabelMatchers: []*labels.Matcher{
|
||||
labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "foobar"),
|
||||
},
|
||||
},
|
||||
expected: `foobar`,
|
||||
},
|
||||
{
|
||||
name: "name matcher only",
|
||||
vs: VectorSelector{
|
||||
LabelMatchers: []*labels.Matcher{
|
||||
labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "foobar"),
|
||||
},
|
||||
},
|
||||
expected: `{__name__="foobar"}`,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
require.Equal(t, tc.expected, tc.vs.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,18 +55,16 @@ func TestQueryLogging(t *testing.T) {
|
|||
queryLogger.Insert(context.Background(), queries[i])
|
||||
|
||||
have := string(fileAsBytes[start:end])
|
||||
if !regexp.MustCompile(want[i]).MatchString(have) {
|
||||
t.Fatalf("Query not written correctly: %s.\nHave %s\nWant %s", queries[i], have, want[i])
|
||||
}
|
||||
require.True(t, regexp.MustCompile(want[i]).MatchString(have),
|
||||
"Query not written correctly: %s", queries[i])
|
||||
}
|
||||
|
||||
// Check if all queries have been deleted.
|
||||
for i := 0; i < 4; i++ {
|
||||
queryLogger.Delete(1 + i*entrySize)
|
||||
}
|
||||
if !regexp.MustCompile(`^\x00+$`).Match(fileAsBytes[1 : 1+entrySize*4]) {
|
||||
t.Fatalf("All queries not deleted properly. Have %s\nWant only null bytes \\x00", string(fileAsBytes[1:1+entrySize*4]))
|
||||
}
|
||||
require.True(t, regexp.MustCompile(`^\x00+$`).Match(fileAsBytes[1:1+entrySize*4]),
|
||||
"All queries not deleted properly. Want only null bytes \\x00")
|
||||
}
|
||||
|
||||
func TestIndexReuse(t *testing.T) {
|
||||
|
@ -101,9 +99,8 @@ func TestIndexReuse(t *testing.T) {
|
|||
end := start + entrySize
|
||||
|
||||
have := queryBytes[start:end]
|
||||
if !regexp.MustCompile(want[i]).Match(have) {
|
||||
t.Fatalf("Index not reused properly:\nHave %s\nWant %s", string(queryBytes[start:end]), want[i])
|
||||
}
|
||||
require.True(t, regexp.MustCompile(want[i]).Match(have),
|
||||
"Index not reused properly.")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -124,14 +121,10 @@ func TestMMapFile(t *testing.T) {
|
|||
|
||||
bytes := make([]byte, 4)
|
||||
n, err := f.Read(bytes)
|
||||
require.Equal(t, n, 2)
|
||||
require.NoError(t, err, "Unexpected error while reading file.")
|
||||
|
||||
if n != 2 || err != nil {
|
||||
t.Fatalf("Error reading file")
|
||||
}
|
||||
|
||||
if string(bytes[:2]) != string(fileAsBytes) {
|
||||
t.Fatalf("Mmap failed")
|
||||
}
|
||||
require.Equal(t, fileAsBytes, bytes[:2], "Mmap failed")
|
||||
}
|
||||
|
||||
func TestParseBrokenJSON(t *testing.T) {
|
||||
|
@ -163,12 +156,9 @@ func TestParseBrokenJSON(t *testing.T) {
|
|||
} {
|
||||
t.Run("", func(t *testing.T) {
|
||||
out, ok := parseBrokenJSON(tc.b)
|
||||
if tc.ok != ok {
|
||||
t.Fatalf("expected %t, got %t", tc.ok, ok)
|
||||
return
|
||||
}
|
||||
if ok && tc.out != out {
|
||||
t.Fatalf("expected %s, got %s", tc.out, out)
|
||||
require.Equal(t, tc.ok, ok)
|
||||
if ok {
|
||||
require.Equal(t, tc.out, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
|
@ -597,9 +598,8 @@ func (t *Test) exec(tc testCommand) error {
|
|||
// clear the current test storage of all inserted samples.
|
||||
func (t *Test) clear() {
|
||||
if t.storage != nil {
|
||||
if err := t.storage.Close(); err != nil {
|
||||
t.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
err := t.storage.Close()
|
||||
require.NoError(t.T, err, "Unexpected error while closing test storage.")
|
||||
}
|
||||
if t.cancelCtx != nil {
|
||||
t.cancelCtx()
|
||||
|
@ -623,9 +623,8 @@ func (t *Test) clear() {
|
|||
func (t *Test) Close() {
|
||||
t.cancelCtx()
|
||||
|
||||
if err := t.storage.Close(); err != nil {
|
||||
t.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
err := t.storage.Close()
|
||||
require.NoError(t.T, err, "Unexpected error while closing test storage.")
|
||||
}
|
||||
|
||||
// samplesAlmostEqual returns true if the two sample lines only differ by a
|
||||
|
@ -722,9 +721,8 @@ func (ll *LazyLoader) parse(input string) error {
|
|||
// clear the current test storage of all inserted samples.
|
||||
func (ll *LazyLoader) clear() {
|
||||
if ll.storage != nil {
|
||||
if err := ll.storage.Close(); err != nil {
|
||||
ll.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
err := ll.storage.Close()
|
||||
require.NoError(ll.T, err, "Unexpected error while closing test storage.")
|
||||
}
|
||||
if ll.cancelCtx != nil {
|
||||
ll.cancelCtx()
|
||||
|
@ -798,8 +796,6 @@ func (ll *LazyLoader) Storage() storage.Storage {
|
|||
// Close closes resources associated with the LazyLoader.
|
||||
func (ll *LazyLoader) Close() {
|
||||
ll.cancelCtx()
|
||||
|
||||
if err := ll.storage.Close(); err != nil {
|
||||
ll.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
err := ll.storage.Close()
|
||||
require.NoError(ll.T, err, "Unexpected error while closing test storage.")
|
||||
}
|
||||
|
|
|
@ -338,6 +338,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
|
|||
model.Time(timestamp.FromTime(ts)),
|
||||
template.QueryFunc(query),
|
||||
externalURL,
|
||||
nil,
|
||||
)
|
||||
result, err := tmpl.Expand()
|
||||
if err != nil {
|
||||
|
|
|
@ -99,12 +99,16 @@ func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) {
|
|||
}
|
||||
|
||||
// NewManager is the Manager constructor
|
||||
func NewManager(logger log.Logger, app storage.Appendable) *Manager {
|
||||
func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager {
|
||||
if o == nil {
|
||||
o = &Options{}
|
||||
}
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
m := &Manager{
|
||||
append: app,
|
||||
opts: o,
|
||||
logger: logger,
|
||||
scrapeConfigs: make(map[string]*config.ScrapeConfig),
|
||||
scrapePools: make(map[string]*scrapePool),
|
||||
|
@ -116,9 +120,15 @@ func NewManager(logger log.Logger, app storage.Appendable) *Manager {
|
|||
return m
|
||||
}
|
||||
|
||||
// Options are the configuration parameters to the scrape manager.
|
||||
type Options struct {
|
||||
ExtraMetrics bool
|
||||
}
|
||||
|
||||
// Manager maintains a set of scrape pools and manages start/stop cycles
|
||||
// when receiving new target groups from the discovery manager.
|
||||
type Manager struct {
|
||||
opts *Options
|
||||
logger log.Logger
|
||||
append storage.Appendable
|
||||
graceShut chan struct{}
|
||||
|
@ -181,7 +191,7 @@ func (m *Manager) reload() {
|
|||
level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
|
||||
continue
|
||||
}
|
||||
sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName))
|
||||
sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts.ExtraMetrics)
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName)
|
||||
continue
|
||||
|
|
|
@ -44,52 +44,66 @@ func TestPopulateLabels(t *testing.T) {
|
|||
"custom": "value",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:1000",
|
||||
model.InstanceLabel: "1.2.3.4:1000",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
"custom": "value",
|
||||
model.AddressLabel: "1.2.3.4:1000",
|
||||
model.InstanceLabel: "1.2.3.4:1000",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
"custom": "value",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:1000",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
"custom": "value",
|
||||
model.AddressLabel: "1.2.3.4:1000",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
"custom": "value",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
},
|
||||
// Pre-define/overwrite scrape config labels.
|
||||
// Leave out port and expect it to be defaulted to scheme.
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.SchemeLabel: "http",
|
||||
model.MetricsPathLabel: "/custom",
|
||||
model.JobLabel: "custom-job",
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.SchemeLabel: "http",
|
||||
model.MetricsPathLabel: "/custom",
|
||||
model.JobLabel: "custom-job",
|
||||
model.ScrapeIntervalLabel: "2s",
|
||||
model.ScrapeTimeoutLabel: "2s",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:80",
|
||||
model.InstanceLabel: "1.2.3.4:80",
|
||||
model.SchemeLabel: "http",
|
||||
model.MetricsPathLabel: "/custom",
|
||||
model.JobLabel: "custom-job",
|
||||
model.AddressLabel: "1.2.3.4:80",
|
||||
model.InstanceLabel: "1.2.3.4:80",
|
||||
model.SchemeLabel: "http",
|
||||
model.MetricsPathLabel: "/custom",
|
||||
model.JobLabel: "custom-job",
|
||||
model.ScrapeIntervalLabel: "2s",
|
||||
model.ScrapeTimeoutLabel: "2s",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.SchemeLabel: "http",
|
||||
model.MetricsPathLabel: "/custom",
|
||||
model.JobLabel: "custom-job",
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.SchemeLabel: "http",
|
||||
model.MetricsPathLabel: "/custom",
|
||||
model.JobLabel: "custom-job",
|
||||
model.ScrapeIntervalLabel: "2s",
|
||||
model.ScrapeTimeoutLabel: "2s",
|
||||
}),
|
||||
},
|
||||
// Provide instance label. HTTPS port default for IPv6.
|
||||
|
@ -99,32 +113,40 @@ func TestPopulateLabels(t *testing.T) {
|
|||
model.InstanceLabel: "custom-instance",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "[::1]:443",
|
||||
model.InstanceLabel: "custom-instance",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.AddressLabel: "[::1]:443",
|
||||
model.InstanceLabel: "custom-instance",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "[::1]",
|
||||
model.InstanceLabel: "custom-instance",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.AddressLabel: "[::1]",
|
||||
model.InstanceLabel: "custom-instance",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
},
|
||||
// Address label missing.
|
||||
{
|
||||
in: labels.FromStrings("custom", "value"),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
res: nil,
|
||||
resOrig: nil,
|
||||
|
@ -134,9 +156,11 @@ func TestPopulateLabels(t *testing.T) {
|
|||
{
|
||||
in: labels.FromStrings("custom", "host:1234"),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
RelabelConfigs: []*relabel.Config{
|
||||
{
|
||||
Action: relabel.Replace,
|
||||
|
@ -148,27 +172,33 @@ func TestPopulateLabels(t *testing.T) {
|
|||
},
|
||||
},
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "host:1234",
|
||||
model.InstanceLabel: "host:1234",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
"custom": "host:1234",
|
||||
model.AddressLabel: "host:1234",
|
||||
model.InstanceLabel: "host:1234",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
"custom": "host:1234",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
"custom": "host:1234",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
"custom": "host:1234",
|
||||
}),
|
||||
},
|
||||
// Address label missing, but added in relabelling.
|
||||
{
|
||||
in: labels.FromStrings("custom", "host:1234"),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
RelabelConfigs: []*relabel.Config{
|
||||
{
|
||||
Action: relabel.Replace,
|
||||
|
@ -180,18 +210,22 @@ func TestPopulateLabels(t *testing.T) {
|
|||
},
|
||||
},
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "host:1234",
|
||||
model.InstanceLabel: "host:1234",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
"custom": "host:1234",
|
||||
model.AddressLabel: "host:1234",
|
||||
model.InstanceLabel: "host:1234",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
"custom": "host:1234",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
"custom": "host:1234",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
"custom": "host:1234",
|
||||
}),
|
||||
},
|
||||
// Invalid UTF-8 in label.
|
||||
|
@ -201,14 +235,102 @@ func TestPopulateLabels(t *testing.T) {
|
|||
"custom": "\xbd",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
res: nil,
|
||||
resOrig: nil,
|
||||
err: "invalid label value for \"custom\": \"\\xbd\"",
|
||||
},
|
||||
// Invalid duration in interval label.
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:1000",
|
||||
model.ScrapeIntervalLabel: "2notseconds",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
res: nil,
|
||||
resOrig: nil,
|
||||
err: "error parsing scrape interval: not a valid duration string: \"2notseconds\"",
|
||||
},
|
||||
// Invalid duration in timeout label.
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:1000",
|
||||
model.ScrapeTimeoutLabel: "2notseconds",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
res: nil,
|
||||
resOrig: nil,
|
||||
err: "error parsing scrape timeout: not a valid duration string: \"2notseconds\"",
|
||||
},
|
||||
// 0 interval in timeout label.
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:1000",
|
||||
model.ScrapeIntervalLabel: "0s",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
res: nil,
|
||||
resOrig: nil,
|
||||
err: "scrape interval cannot be 0",
|
||||
},
|
||||
// 0 duration in timeout label.
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:1000",
|
||||
model.ScrapeTimeoutLabel: "0s",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
res: nil,
|
||||
resOrig: nil,
|
||||
err: "scrape timeout cannot be 0",
|
||||
},
|
||||
// Timeout less than interval.
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:1000",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "2s",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
res: nil,
|
||||
resOrig: nil,
|
||||
err: "scrape timeout cannot be greater than scrape interval (\"2s\" > \"1s\")",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
in := c.in.Copy()
|
||||
|
@ -276,7 +398,8 @@ scrape_configs:
|
|||
ch = make(chan struct{}, 1)
|
||||
)
|
||||
|
||||
scrapeManager := NewManager(nil, nil)
|
||||
opts := Options{}
|
||||
scrapeManager := NewManager(&opts, nil, nil)
|
||||
newLoop := func(scrapeLoopOptions) loop {
|
||||
ch <- struct{}{}
|
||||
return noopLoop()
|
||||
|
@ -338,7 +461,8 @@ scrape_configs:
|
|||
}
|
||||
|
||||
func TestManagerTargetsUpdates(t *testing.T) {
|
||||
m := NewManager(nil, nil)
|
||||
opts := Options{}
|
||||
m := NewManager(&opts, nil, nil)
|
||||
|
||||
ts := make(chan map[string][]*targetgroup.Group)
|
||||
go m.Run(ts)
|
||||
|
@ -390,7 +514,8 @@ global:
|
|||
return cfg
|
||||
}
|
||||
|
||||
scrapeManager := NewManager(nil, nil)
|
||||
opts := Options{}
|
||||
scrapeManager := NewManager(&opts, nil, nil)
|
||||
|
||||
// Load the first config.
|
||||
cfg1 := getConfig("ha1")
|
||||
|
|
|
@ -49,10 +49,10 @@ import (
|
|||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
||||
// Temporary tolerance for scrape appends timestamps alignment, to enable better
|
||||
// compression at the TSDB level.
|
||||
// ScrapeTimestampTolerance is the tolerance for scrape appends timestamps
|
||||
// alignment, to enable better compression at the TSDB level.
|
||||
// See https://github.com/prometheus/prometheus/issues/7846
|
||||
const scrapeTimestampTolerance = 2 * time.Millisecond
|
||||
var ScrapeTimestampTolerance = 2 * time.Millisecond
|
||||
|
||||
// AlignScrapeTimestamps enables the tolerance for scrape appends timestamps described above.
|
||||
var AlignScrapeTimestamps = true
|
||||
|
@ -253,6 +253,8 @@ type scrapeLoopOptions struct {
|
|||
labelLimits *labelLimits
|
||||
honorLabels bool
|
||||
honorTimestamps bool
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
mrc []*relabel.Config
|
||||
cache *scrapeCache
|
||||
}
|
||||
|
@ -261,7 +263,7 @@ const maxAheadTime = 10 * time.Minute
|
|||
|
||||
type labelsMutator func(labels.Labels) labels.Labels
|
||||
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger) (*scrapePool, error) {
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportScrapeTimeout bool) (*scrapePool, error) {
|
||||
targetScrapePools.Inc()
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
|
@ -306,7 +308,11 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
cache,
|
||||
jitterSeed,
|
||||
opts.honorTimestamps,
|
||||
opts.sampleLimit,
|
||||
opts.labelLimits,
|
||||
opts.interval,
|
||||
opts.timeout,
|
||||
reportScrapeTimeout,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -414,6 +420,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
} else {
|
||||
cache = newScrapeCache()
|
||||
}
|
||||
|
||||
var (
|
||||
t = sp.activeTargets[fp]
|
||||
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit}
|
||||
|
@ -426,6 +433,8 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
honorTimestamps: honorTimestamps,
|
||||
mrc: mrc,
|
||||
cache: cache,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
})
|
||||
)
|
||||
wg.Add(1)
|
||||
|
@ -435,7 +444,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
wg.Done()
|
||||
|
||||
newLoop.setForcedError(forcedErr)
|
||||
newLoop.run(interval, timeout, nil)
|
||||
newLoop.run(nil)
|
||||
}(oldLoop, newLoop)
|
||||
|
||||
sp.loops[fp] = newLoop
|
||||
|
@ -509,6 +518,12 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
hash := t.hash()
|
||||
|
||||
if _, ok := sp.activeTargets[hash]; !ok {
|
||||
// The scrape interval and timeout labels are set to the config's values initially,
|
||||
// so whether changed via relabeling or not, they'll exist and hold the correct values
|
||||
// for every target.
|
||||
var err error
|
||||
interval, timeout, err = t.intervalAndTimeout(interval, timeout)
|
||||
|
||||
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit}
|
||||
l := sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
|
@ -518,7 +533,12 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
honorLabels: honorLabels,
|
||||
honorTimestamps: honorTimestamps,
|
||||
mrc: mrc,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
})
|
||||
if err != nil {
|
||||
l.setForcedError(err)
|
||||
}
|
||||
|
||||
sp.activeTargets[hash] = t
|
||||
sp.loops[hash] = l
|
||||
|
@ -560,7 +580,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
}
|
||||
for _, l := range uniqueLoops {
|
||||
if l != nil {
|
||||
go l.run(interval, timeout, nil)
|
||||
go l.run(nil)
|
||||
}
|
||||
}
|
||||
// Wait for all potentially stopped scrapers to terminate.
|
||||
|
@ -701,7 +721,7 @@ var errBodySizeLimit = errors.New("body size limit exceeded")
|
|||
|
||||
const acceptHeader = `application/openmetrics-text; version=0.0.1,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
|
||||
var userAgentHeader = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error) {
|
||||
if s.req == nil {
|
||||
|
@ -711,7 +731,7 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|||
}
|
||||
req.Header.Add("Accept", acceptHeader)
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
req.Header.Set("User-Agent", userAgentHeader)
|
||||
req.Header.Set("User-Agent", UserAgent)
|
||||
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64))
|
||||
|
||||
s.req = req
|
||||
|
@ -772,7 +792,7 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|||
|
||||
// A loop can run and be stopped again. It must not be reused after it was stopped.
|
||||
type loop interface {
|
||||
run(interval, timeout time.Duration, errc chan<- error)
|
||||
run(errc chan<- error)
|
||||
setForcedError(err error)
|
||||
stop()
|
||||
getCache() *scrapeCache
|
||||
|
@ -796,7 +816,10 @@ type scrapeLoop struct {
|
|||
honorTimestamps bool
|
||||
forcedErr error
|
||||
forcedErrMtx sync.Mutex
|
||||
sampleLimit int
|
||||
labelLimits *labelLimits
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
|
||||
appender func(ctx context.Context) storage.Appender
|
||||
sampleMutator labelsMutator
|
||||
|
@ -808,6 +831,8 @@ type scrapeLoop struct {
|
|||
stopped chan struct{}
|
||||
|
||||
disabledEndOfRunStalenessMarkers bool
|
||||
|
||||
reportScrapeTimeout bool
|
||||
}
|
||||
|
||||
// scrapeCache tracks mappings of exposed metric strings to label sets and
|
||||
|
@ -1064,7 +1089,11 @@ func newScrapeLoop(ctx context.Context,
|
|||
cache *scrapeCache,
|
||||
jitterSeed uint64,
|
||||
honorTimestamps bool,
|
||||
sampleLimit int,
|
||||
labelLimits *labelLimits,
|
||||
interval time.Duration,
|
||||
timeout time.Duration,
|
||||
reportScrapeTimeout bool,
|
||||
) *scrapeLoop {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
|
@ -1087,16 +1116,20 @@ func newScrapeLoop(ctx context.Context,
|
|||
l: l,
|
||||
parentCtx: ctx,
|
||||
honorTimestamps: honorTimestamps,
|
||||
sampleLimit: sampleLimit,
|
||||
labelLimits: labelLimits,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
reportScrapeTimeout: reportScrapeTimeout,
|
||||
}
|
||||
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
||||
|
||||
return sl
|
||||
}
|
||||
|
||||
func (sl *scrapeLoop) run(interval, timeout time.Duration, errc chan<- error) {
|
||||
func (sl *scrapeLoop) run(errc chan<- error) {
|
||||
select {
|
||||
case <-time.After(sl.scraper.offset(interval, sl.jitterSeed)):
|
||||
case <-time.After(sl.scraper.offset(sl.interval, sl.jitterSeed)):
|
||||
// Continue after a scraping offset.
|
||||
case <-sl.ctx.Done():
|
||||
close(sl.stopped)
|
||||
|
@ -1106,7 +1139,7 @@ func (sl *scrapeLoop) run(interval, timeout time.Duration, errc chan<- error) {
|
|||
var last time.Time
|
||||
|
||||
alignedScrapeTime := time.Now().Round(0)
|
||||
ticker := time.NewTicker(interval)
|
||||
ticker := time.NewTicker(sl.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
mainLoop:
|
||||
|
@ -1126,19 +1159,19 @@ mainLoop:
|
|||
// Calling Round ensures the time used is the wall clock, as otherwise .Sub
|
||||
// and .Add on time.Time behave differently (see time package docs).
|
||||
scrapeTime := time.Now().Round(0)
|
||||
if AlignScrapeTimestamps && interval > 100*scrapeTimestampTolerance {
|
||||
if AlignScrapeTimestamps && sl.interval > 100*ScrapeTimestampTolerance {
|
||||
// For some reason, a tick might have been skipped, in which case we
|
||||
// would call alignedScrapeTime.Add(interval) multiple times.
|
||||
for scrapeTime.Sub(alignedScrapeTime) >= interval {
|
||||
alignedScrapeTime = alignedScrapeTime.Add(interval)
|
||||
for scrapeTime.Sub(alignedScrapeTime) >= sl.interval {
|
||||
alignedScrapeTime = alignedScrapeTime.Add(sl.interval)
|
||||
}
|
||||
// Align the scrape time if we are in the tolerance boundaries.
|
||||
if scrapeTime.Sub(alignedScrapeTime) <= scrapeTimestampTolerance {
|
||||
if scrapeTime.Sub(alignedScrapeTime) <= ScrapeTimestampTolerance {
|
||||
scrapeTime = alignedScrapeTime
|
||||
}
|
||||
}
|
||||
|
||||
last = sl.scrapeAndReport(interval, timeout, last, scrapeTime, errc)
|
||||
last = sl.scrapeAndReport(sl.interval, sl.timeout, last, scrapeTime, errc)
|
||||
|
||||
select {
|
||||
case <-sl.parentCtx.Done():
|
||||
|
@ -1153,7 +1186,7 @@ mainLoop:
|
|||
close(sl.stopped)
|
||||
|
||||
if !sl.disabledEndOfRunStalenessMarkers {
|
||||
sl.endOfRunStaleness(last, ticker, interval)
|
||||
sl.endOfRunStaleness(last, ticker, sl.interval)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1192,7 +1225,7 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last, app
|
|||
}()
|
||||
|
||||
defer func() {
|
||||
if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, scrapeErr); err != nil {
|
||||
if err = sl.report(app, appendTime, timeout, time.Since(start), total, added, seriesAdded, scrapeErr); err != nil {
|
||||
level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err)
|
||||
}
|
||||
}()
|
||||
|
@ -1357,6 +1390,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
|||
defTime = timestamp.FromTime(ts)
|
||||
appErrs = appendErrors{}
|
||||
sampleLimitErr error
|
||||
e exemplar.Exemplar // escapes to heap so hoisted out of loop
|
||||
)
|
||||
|
||||
defer func() {
|
||||
|
@ -1373,7 +1407,6 @@ loop:
|
|||
var (
|
||||
et textparse.Entry
|
||||
sampleAdded bool
|
||||
e exemplar.Exemplar
|
||||
)
|
||||
if et, err = p.Next(); err != nil {
|
||||
if err == io.EOF {
|
||||
|
@ -1480,6 +1513,7 @@ loop:
|
|||
// Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors.
|
||||
level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
||||
}
|
||||
e = exemplar.Exemplar{} // reset for next time round loop
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1580,9 +1614,11 @@ const (
|
|||
scrapeSamplesMetricName = "scrape_samples_scraped" + "\xff"
|
||||
samplesPostRelabelMetricName = "scrape_samples_post_metric_relabeling" + "\xff"
|
||||
scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff"
|
||||
scrapeTimeoutMetricName = "scrape_timeout_seconds" + "\xff"
|
||||
scrapeSampleLimitMetricName = "scrape_sample_limit" + "\xff"
|
||||
)
|
||||
|
||||
func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded int, scrapeErr error) (err error) {
|
||||
func (sl *scrapeLoop) report(app storage.Appender, start time.Time, timeout, duration time.Duration, scraped, added, seriesAdded int, scrapeErr error) (err error) {
|
||||
sl.scraper.Report(start, duration, scrapeErr)
|
||||
|
||||
ts := timestamp.FromTime(start)
|
||||
|
@ -1607,6 +1643,14 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim
|
|||
if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded)); err != nil {
|
||||
return
|
||||
}
|
||||
if sl.reportScrapeTimeout {
|
||||
if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, timeout.Seconds()); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1630,6 +1674,14 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er
|
|||
if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, stale); err != nil {
|
||||
return
|
||||
}
|
||||
if sl.reportScrapeTimeout {
|
||||
if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, stale); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, stale); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ func TestNewScrapePool(t *testing.T) {
|
|||
var (
|
||||
app = &nopAppendable{}
|
||||
cfg = &config.ScrapeConfig{}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false)
|
||||
)
|
||||
|
||||
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
|
||||
|
@ -92,8 +92,8 @@ func TestDroppedTargetsList(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil)
|
||||
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", job=\"dropMe\"}"
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false)
|
||||
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
|
||||
expectedLength = 1
|
||||
)
|
||||
sp.Sync(tgs)
|
||||
|
@ -146,14 +146,16 @@ type testLoop struct {
|
|||
forcedErr error
|
||||
forcedErrMtx sync.Mutex
|
||||
runOnce bool
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (l *testLoop) run(interval, timeout time.Duration, errc chan<- error) {
|
||||
func (l *testLoop) run(errc chan<- error) {
|
||||
if l.runOnce {
|
||||
panic("loop must be started only once")
|
||||
}
|
||||
l.runOnce = true
|
||||
l.startFunc(interval, timeout, errc)
|
||||
l.startFunc(l.interval, l.timeout, errc)
|
||||
}
|
||||
|
||||
func (l *testLoop) disableEndOfRunStalenessMarkers() {
|
||||
|
@ -250,7 +252,7 @@ func TestScrapePoolReload(t *testing.T) {
|
|||
// On starting to run, new loops created on reload check whether their preceding
|
||||
// equivalents have been stopped.
|
||||
newLoop := func(opts scrapeLoopOptions) loop {
|
||||
l := &testLoop{}
|
||||
l := &testLoop{interval: time.Duration(reloadCfg.ScrapeInterval), timeout: time.Duration(reloadCfg.ScrapeTimeout)}
|
||||
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
|
||||
require.Equal(t, 3*time.Second, interval, "Unexpected scrape interval")
|
||||
require.Equal(t, 2*time.Second, timeout, "Unexpected scrape timeout")
|
||||
|
@ -276,8 +278,10 @@ func TestScrapePoolReload(t *testing.T) {
|
|||
// one terminated.
|
||||
|
||||
for i := 0; i < numTargets; i++ {
|
||||
labels := labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i))
|
||||
t := &Target{
|
||||
labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)),
|
||||
labels: labels,
|
||||
discoveredLabels: labels,
|
||||
}
|
||||
l := &testLoop{}
|
||||
l.stopFunc = func() {
|
||||
|
@ -342,7 +346,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
|
|||
activeTargets: map[uint64]*Target{},
|
||||
loops: map[uint64]loop{},
|
||||
newLoop: newLoop,
|
||||
logger: nil,
|
||||
logger: log.NewNopLogger(),
|
||||
client: http.DefaultClient,
|
||||
}
|
||||
|
||||
|
@ -452,7 +456,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
|
|||
func TestScrapePoolAppender(t *testing.T) {
|
||||
cfg := &config.ScrapeConfig{}
|
||||
app := &nopAppendable{}
|
||||
sp, _ := newScrapePool(cfg, app, 0, nil)
|
||||
sp, _ := newScrapePool(cfg, app, 0, nil, false)
|
||||
|
||||
loop := sp.newLoop(scrapeLoopOptions{
|
||||
target: &Target{},
|
||||
|
@ -488,12 +492,12 @@ func TestScrapePoolAppender(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestScrapePoolRaces(t *testing.T) {
|
||||
interval, _ := model.ParseDuration("500ms")
|
||||
timeout, _ := model.ParseDuration("1s")
|
||||
interval, _ := model.ParseDuration("1s")
|
||||
timeout, _ := model.ParseDuration("500ms")
|
||||
newConfig := func() *config.ScrapeConfig {
|
||||
return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout}
|
||||
}
|
||||
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil)
|
||||
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, false)
|
||||
tgts := []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
|
@ -582,7 +586,11 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
|
|||
nopMutator,
|
||||
nil, nil, 0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
1,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
// The scrape pool synchronizes on stopping scrape loops. However, new scrape
|
||||
|
@ -611,7 +619,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
|
|||
|
||||
runDone := make(chan struct{})
|
||||
go func() {
|
||||
sl.run(1, 0, nil)
|
||||
sl.run(nil)
|
||||
close(runDone)
|
||||
}()
|
||||
|
||||
|
@ -647,7 +655,11 @@ func TestScrapeLoopStop(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
)
|
||||
|
||||
// Terminate loop after 2 scrapes.
|
||||
|
@ -664,7 +676,7 @@ func TestScrapeLoopStop(t *testing.T) {
|
|||
}
|
||||
|
||||
go func() {
|
||||
sl.run(10*time.Millisecond, time.Hour, nil)
|
||||
sl.run(nil)
|
||||
signal <- struct{}{}
|
||||
}()
|
||||
|
||||
|
@ -715,7 +727,11 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
time.Second,
|
||||
time.Hour,
|
||||
false,
|
||||
)
|
||||
|
||||
// The loop must terminate during the initial offset if the context
|
||||
|
@ -723,7 +739,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
scraper.offsetDur = time.Hour
|
||||
|
||||
go func() {
|
||||
sl.run(time.Second, time.Hour, errc)
|
||||
sl.run(errc)
|
||||
signal <- struct{}{}
|
||||
}()
|
||||
|
||||
|
@ -763,11 +779,15 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
time.Second,
|
||||
100*time.Millisecond,
|
||||
false,
|
||||
)
|
||||
|
||||
go func() {
|
||||
sl.run(time.Second, 100*time.Millisecond, errc)
|
||||
sl.run(errc)
|
||||
signal <- struct{}{}
|
||||
}()
|
||||
|
||||
|
@ -815,7 +835,11 @@ func TestScrapeLoopForcedErr(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
time.Second,
|
||||
time.Hour,
|
||||
false,
|
||||
)
|
||||
|
||||
forcedErr := fmt.Errorf("forced err")
|
||||
|
@ -827,7 +851,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
|
|||
}
|
||||
|
||||
go func() {
|
||||
sl.run(time.Second, time.Hour, errc)
|
||||
sl.run(errc)
|
||||
signal <- struct{}{}
|
||||
}()
|
||||
|
||||
|
@ -866,7 +890,11 @@ func TestScrapeLoopMetadata(t *testing.T) {
|
|||
cache,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
|
@ -901,10 +929,10 @@ test_metric 1
|
|||
require.Equal(t, "", md.Unit)
|
||||
}
|
||||
|
||||
func TestScrapeLoopSeriesAdded(t *testing.T) {
|
||||
func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
|
||||
// Need a full storage for correct Add/AddFast semantics.
|
||||
s := teststorage.New(t)
|
||||
defer s.Close()
|
||||
t.Cleanup(func() { s.Close() })
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sl := newScrapeLoop(ctx,
|
||||
|
@ -916,9 +944,19 @@ func TestScrapeLoopSeriesAdded(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
defer cancel()
|
||||
t.Cleanup(func() { cancel() })
|
||||
|
||||
return ctx, sl
|
||||
}
|
||||
|
||||
func TestScrapeLoopSeriesAdded(t *testing.T) {
|
||||
ctx, sl := simpleTestScrapeLoop(t)
|
||||
|
||||
slApp := sl.appender(ctx)
|
||||
total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{})
|
||||
|
@ -937,6 +975,46 @@ func TestScrapeLoopSeriesAdded(t *testing.T) {
|
|||
require.Equal(t, 0, seriesAdded)
|
||||
}
|
||||
|
||||
func makeTestMetrics(n int) []byte {
|
||||
// Construct a metrics string to parse
|
||||
sb := bytes.Buffer{}
|
||||
for i := 0; i < n; i++ {
|
||||
fmt.Fprintf(&sb, "# TYPE metric_a gauge\n")
|
||||
fmt.Fprintf(&sb, "# HELP metric_a help text\n")
|
||||
fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100)
|
||||
}
|
||||
return sb.Bytes()
|
||||
}
|
||||
|
||||
func BenchmarkScrapeLoopAppend(b *testing.B) {
|
||||
ctx, sl := simpleTestScrapeLoop(b)
|
||||
|
||||
slApp := sl.appender(ctx)
|
||||
metrics := makeTestMetrics(100)
|
||||
ts := time.Time{}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ts = ts.Add(time.Second)
|
||||
_, _, _, _ = sl.append(slApp, metrics, "", ts)
|
||||
}
|
||||
}
|
||||
func BenchmarkScrapeLoopAppendOM(b *testing.B) {
|
||||
ctx, sl := simpleTestScrapeLoop(b)
|
||||
|
||||
slApp := sl.appender(ctx)
|
||||
metrics := makeTestMetrics(100)
|
||||
ts := time.Time{}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ts = ts.Add(time.Second)
|
||||
_, _, _, _ = sl.append(slApp, metrics, "application/openmetrics-text", ts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
||||
appender := &collectResultAppender{}
|
||||
var (
|
||||
|
@ -955,7 +1033,11 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
)
|
||||
// Succeed once, several failures, then stop.
|
||||
numScrapes := 0
|
||||
|
@ -973,7 +1055,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
|||
}
|
||||
|
||||
go func() {
|
||||
sl.run(10*time.Millisecond, time.Hour, nil)
|
||||
sl.run(nil)
|
||||
signal <- struct{}{}
|
||||
}()
|
||||
|
||||
|
@ -1010,7 +1092,11 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
)
|
||||
|
||||
// Succeed once, several failures, then stop.
|
||||
|
@ -1030,7 +1116,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
|||
}
|
||||
|
||||
go func() {
|
||||
sl.run(10*time.Millisecond, time.Hour, nil)
|
||||
sl.run(nil)
|
||||
signal <- struct{}{}
|
||||
}()
|
||||
|
||||
|
@ -1069,7 +1155,11 @@ func TestScrapeLoopCache(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
)
|
||||
|
||||
numScrapes := 0
|
||||
|
@ -1106,7 +1196,7 @@ func TestScrapeLoopCache(t *testing.T) {
|
|||
}
|
||||
|
||||
go func() {
|
||||
sl.run(10*time.Millisecond, time.Hour, nil)
|
||||
sl.run(nil)
|
||||
signal <- struct{}{}
|
||||
}()
|
||||
|
||||
|
@ -1144,7 +1234,11 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
)
|
||||
|
||||
numScrapes := 0
|
||||
|
@ -1164,7 +1258,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
|||
}
|
||||
|
||||
go func() {
|
||||
sl.run(10*time.Millisecond, time.Hour, nil)
|
||||
sl.run(nil)
|
||||
signal <- struct{}{}
|
||||
}()
|
||||
|
||||
|
@ -1251,7 +1345,11 @@ func TestScrapeLoopAppend(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -1293,7 +1391,11 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
fakeRef := uint64(1)
|
||||
|
@ -1343,7 +1445,11 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
app.limit,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
// Get the value of the Counter before performing the append.
|
||||
|
@ -1413,7 +1519,11 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -1454,7 +1564,11 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -1498,7 +1612,11 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -1600,7 +1718,11 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -1658,7 +1780,11 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -1703,7 +1829,11 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
)
|
||||
|
||||
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
||||
|
@ -1711,7 +1841,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
|
|||
return errors.New("scrape failed")
|
||||
}
|
||||
|
||||
sl.run(10*time.Millisecond, time.Hour, nil)
|
||||
sl.run(nil)
|
||||
require.Equal(t, 0.0, appender.result[0].v, "bad 'up' value")
|
||||
}
|
||||
|
||||
|
@ -1732,7 +1862,11 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
)
|
||||
|
||||
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
||||
|
@ -1741,7 +1875,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
|
||||
sl.run(10*time.Millisecond, time.Hour, nil)
|
||||
sl.run(nil)
|
||||
require.Equal(t, 0.0, appender.result[0].v, "bad 'up' value")
|
||||
}
|
||||
|
||||
|
@ -1774,7 +1908,11 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
now := time.Unix(1, 0)
|
||||
|
@ -1812,7 +1950,11 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
now := time.Now().Add(20 * time.Minute)
|
||||
|
@ -2063,7 +2205,11 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
|
|||
func(ctx context.Context) storage.Appender { return capp },
|
||||
nil, 0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -2097,7 +2243,11 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
|
|||
func(ctx context.Context) storage.Appender { return capp },
|
||||
nil, 0,
|
||||
false,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -2130,7 +2280,11 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
|
@ -2181,7 +2335,11 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
|
@ -2274,7 +2432,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
|||
ScrapeInterval: model.Duration(5 * time.Second),
|
||||
MetricsPath: "/metrics",
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false)
|
||||
t1 = &Target{
|
||||
discoveredLabels: labels.Labels{
|
||||
labels.Label{
|
||||
|
@ -2399,7 +2557,11 @@ func TestScrapeAddFast(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
|
@ -2429,7 +2591,7 @@ func TestReuseCacheRace(t *testing.T) {
|
|||
ScrapeInterval: model.Duration(5 * time.Second),
|
||||
MetricsPath: "/metrics",
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false)
|
||||
t1 = &Target{
|
||||
discoveredLabels: labels.Labels{
|
||||
labels.Label{
|
||||
|
@ -2483,7 +2645,11 @@ func TestScrapeReportSingleAppender(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
)
|
||||
|
||||
numScrapes := 0
|
||||
|
@ -2498,7 +2664,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
|
|||
}
|
||||
|
||||
go func() {
|
||||
sl.run(10*time.Millisecond, time.Hour, nil)
|
||||
sl.run(nil)
|
||||
signal <- struct{}{}
|
||||
}()
|
||||
|
||||
|
@ -2612,7 +2778,11 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
&test.labelLimits,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
|
||||
slApp := sl.appender(context.Background())
|
||||
|
@ -2627,3 +2797,40 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
|
||||
interval, _ := model.ParseDuration("2s")
|
||||
timeout, _ := model.ParseDuration("500ms")
|
||||
config := &config.ScrapeConfig{
|
||||
ScrapeInterval: interval,
|
||||
ScrapeTimeout: timeout,
|
||||
RelabelConfigs: []*relabel.Config{
|
||||
{
|
||||
SourceLabels: model.LabelNames{model.ScrapeIntervalLabel},
|
||||
Regex: relabel.MustNewRegexp("2s"),
|
||||
Replacement: "3s",
|
||||
TargetLabel: model.ScrapeIntervalLabel,
|
||||
Action: relabel.Replace,
|
||||
},
|
||||
{
|
||||
SourceLabels: model.LabelNames{model.ScrapeTimeoutLabel},
|
||||
Regex: relabel.MustNewRegexp("500ms"),
|
||||
Replacement: "750ms",
|
||||
TargetLabel: model.ScrapeTimeoutLabel,
|
||||
Action: relabel.Replace,
|
||||
},
|
||||
},
|
||||
}
|
||||
sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, false)
|
||||
tgts := []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}},
|
||||
},
|
||||
}
|
||||
|
||||
sp.Sync(tgts)
|
||||
defer sp.stop()
|
||||
|
||||
require.Equal(t, "3s", sp.ActiveTargets()[0].labels.Get(model.ScrapeIntervalLabel))
|
||||
require.Equal(t, "750ms", sp.ActiveTargets()[0].labels.Get(model.ScrapeTimeoutLabel))
|
||||
}
|
||||
|
|
|
@ -143,8 +143,18 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) {
|
|||
// hash returns an identifying hash for the target.
|
||||
func (t *Target) hash() uint64 {
|
||||
h := fnv.New64a()
|
||||
|
||||
// We must build a label set without the scrape interval and timeout
|
||||
// labels because those aren't defining attributes of a target
|
||||
// and can be changed without qualifying its parent as a new target,
|
||||
// therefore they should not effect its unique hash.
|
||||
l := t.labels.Map()
|
||||
delete(l, model.ScrapeIntervalLabel)
|
||||
delete(l, model.ScrapeTimeoutLabel)
|
||||
lset := labels.FromMap(l)
|
||||
|
||||
//nolint: errcheck
|
||||
h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash())))
|
||||
h.Write([]byte(fmt.Sprintf("%016d", lset.Hash())))
|
||||
//nolint: errcheck
|
||||
h.Write([]byte(t.URL().String()))
|
||||
|
||||
|
@ -273,6 +283,31 @@ func (t *Target) Health() TargetHealth {
|
|||
return t.health
|
||||
}
|
||||
|
||||
// intervalAndTimeout returns the interval and timeout derived from
|
||||
// the targets labels.
|
||||
func (t *Target) intervalAndTimeout(defaultInterval, defaultDuration time.Duration) (time.Duration, time.Duration, error) {
|
||||
t.mtx.RLock()
|
||||
defer t.mtx.RUnlock()
|
||||
|
||||
intervalLabel := t.labels.Get(model.ScrapeIntervalLabel)
|
||||
interval, err := model.ParseDuration(intervalLabel)
|
||||
if err != nil {
|
||||
return defaultInterval, defaultDuration, errors.Errorf("Error parsing interval label %q: %v", intervalLabel, err)
|
||||
}
|
||||
timeoutLabel := t.labels.Get(model.ScrapeTimeoutLabel)
|
||||
timeout, err := model.ParseDuration(timeoutLabel)
|
||||
if err != nil {
|
||||
return defaultInterval, defaultDuration, errors.Errorf("Error parsing timeout label %q: %v", timeoutLabel, err)
|
||||
}
|
||||
|
||||
return time.Duration(interval), time.Duration(timeout), nil
|
||||
}
|
||||
|
||||
// GetValue gets a label value from the entire label set.
|
||||
func (t *Target) GetValue(name string) string {
|
||||
return t.labels.Get(name)
|
||||
}
|
||||
|
||||
// Targets is a sortable list of targets.
|
||||
type Targets []*Target
|
||||
|
||||
|
@ -329,6 +364,8 @@ func populateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
|
|||
// Copy labels into the labelset for the target if they are not set already.
|
||||
scrapeLabels := []labels.Label{
|
||||
{Name: model.JobLabel, Value: cfg.JobName},
|
||||
{Name: model.ScrapeIntervalLabel, Value: cfg.ScrapeInterval.String()},
|
||||
{Name: model.ScrapeTimeoutLabel, Value: cfg.ScrapeTimeout.String()},
|
||||
{Name: model.MetricsPathLabel, Value: cfg.MetricsPath},
|
||||
{Name: model.SchemeLabel, Value: cfg.Scheme},
|
||||
}
|
||||
|
@ -390,6 +427,34 @@ func populateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
var interval string
|
||||
var intervalDuration model.Duration
|
||||
if interval = lset.Get(model.ScrapeIntervalLabel); interval != cfg.ScrapeInterval.String() {
|
||||
intervalDuration, err = model.ParseDuration(interval)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("error parsing scrape interval: %v", err)
|
||||
}
|
||||
if time.Duration(intervalDuration) == 0 {
|
||||
return nil, nil, errors.New("scrape interval cannot be 0")
|
||||
}
|
||||
}
|
||||
|
||||
var timeout string
|
||||
var timeoutDuration model.Duration
|
||||
if timeout = lset.Get(model.ScrapeTimeoutLabel); timeout != cfg.ScrapeTimeout.String() {
|
||||
timeoutDuration, err = model.ParseDuration(timeout)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("error parsing scrape timeout: %v", err)
|
||||
}
|
||||
if time.Duration(timeoutDuration) == 0 {
|
||||
return nil, nil, errors.New("scrape timeout cannot be 0")
|
||||
}
|
||||
}
|
||||
|
||||
if timeoutDuration > intervalDuration {
|
||||
return nil, nil, errors.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval)
|
||||
}
|
||||
|
||||
// Meta labels are deleted after relabelling. Other internal labels propagate to
|
||||
// the target which decides whether they will be part of their label set.
|
||||
for _, l := range lset {
|
||||
|
|
|
@ -382,3 +382,29 @@ func TestTargetsFromGroup(t *testing.T) {
|
|||
t.Fatalf("Expected error %s, got %s", expectedError, failures[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetHash(t *testing.T) {
|
||||
target1 := &Target{
|
||||
labels: labels.Labels{
|
||||
{Name: model.AddressLabel, Value: "localhost"},
|
||||
{Name: model.SchemeLabel, Value: "http"},
|
||||
{Name: model.MetricsPathLabel, Value: "/metrics"},
|
||||
{Name: model.ScrapeIntervalLabel, Value: "15s"},
|
||||
{Name: model.ScrapeTimeoutLabel, Value: "500ms"},
|
||||
},
|
||||
}
|
||||
hash1 := target1.hash()
|
||||
|
||||
target2 := &Target{
|
||||
labels: labels.Labels{
|
||||
{Name: model.AddressLabel, Value: "localhost"},
|
||||
{Name: model.SchemeLabel, Value: "http"},
|
||||
{Name: model.MetricsPathLabel, Value: "/metrics"},
|
||||
{Name: model.ScrapeIntervalLabel, Value: "14s"},
|
||||
{Name: model.ScrapeTimeoutLabel, Value: "600ms"},
|
||||
},
|
||||
}
|
||||
hash2 := target2.hash()
|
||||
|
||||
require.Equal(t, hash1, hash2, "Scrape interval and duration labels should not effect hash.")
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ if ! [[ $(protoc --version) =~ "3.15.8" ]]; then
|
|||
exit 255
|
||||
fi
|
||||
|
||||
# Since we run go get, go mod download, the go.sum will change.
|
||||
# Since we run go install, go mod download, the go.sum will change.
|
||||
# Make a backup.
|
||||
cp go.sum go.sum.bak
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then
|
|||
fi
|
||||
|
||||
# List of files that should be synced.
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint"
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint .github/workflows/golangci-lint.yml"
|
||||
|
||||
# Go to the root of the repo
|
||||
cd "$(git rev-parse --show-cdup)" || exit 1
|
||||
|
@ -96,6 +96,15 @@ check_license() {
|
|||
echo "$1" | grep --quiet --no-messages --ignore-case 'Apache License'
|
||||
}
|
||||
|
||||
check_go() {
|
||||
local org_repo
|
||||
local default_branch
|
||||
org_repo="$1"
|
||||
default_branch="$2"
|
||||
|
||||
curl -sLf -o /dev/null "https://raw.githubusercontent.com/${org_repo}/${default_branch}/go.mod"
|
||||
}
|
||||
|
||||
check_circleci_orb() {
|
||||
local org_repo
|
||||
local default_branch
|
||||
|
@ -136,10 +145,14 @@ process_repo() {
|
|||
echo "LICENSE in ${org_repo} is not apache, skipping."
|
||||
continue
|
||||
fi
|
||||
if [[ "${source_file}" == '.github/workflows/golangci-lint.yml' ]] && ! check_go "${org_repo}" "${default_branch}" ; then
|
||||
echo "${org_repo} is not Go, skipping .github/workflows/golangci-lint.yml."
|
||||
continue
|
||||
fi
|
||||
if [[ -z "${target_file}" ]]; then
|
||||
echo "${source_file} doesn't exist in ${org_repo}"
|
||||
case "${source_file}" in
|
||||
CODE_OF_CONDUCT.md | SECURITY.md)
|
||||
CODE_OF_CONDUCT.md | SECURITY.md | .github/workflows/golangci-lint.yml)
|
||||
echo "${source_file} missing in ${org_repo}, force updating."
|
||||
needs_update+=("${source_file}")
|
||||
;;
|
||||
|
@ -172,6 +185,9 @@ process_repo() {
|
|||
cd "${tmp_dir}/${org_repo}" || return 1
|
||||
git checkout -b "${branch}" || return 1
|
||||
|
||||
# If we need to add an Actions file this directory needs to be present.
|
||||
mkdir -p "./.github/workflows"
|
||||
|
||||
# Update the files in target repo by one from prometheus/prometheus.
|
||||
for source_file in "${needs_update[@]}"; do
|
||||
case "${source_file}" in
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
// Package tools tracks dependencies for tools that are required to generate the protobuf code.
|
||||
|
|
|
@ -33,9 +33,9 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/sigv4"
|
||||
"github.com/prometheus/common/version"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
|
@ -97,7 +97,7 @@ type ClientConfig struct {
|
|||
URL *config_util.URL
|
||||
Timeout model.Duration
|
||||
HTTPClientConfig config_util.HTTPClientConfig
|
||||
SigV4Config *config.SigV4Config
|
||||
SigV4Config *sigv4.SigV4Config
|
||||
Headers map[string]string
|
||||
RetryOnRateLimit bool
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
|||
t := httpClient.Transport
|
||||
|
||||
if conf.SigV4Config != nil {
|
||||
t, err = newSigV4RoundTripper(conf.SigV4Config, httpClient.Transport)
|
||||
t, err = sigv4.NewSigV4RoundTripper(conf.SigV4Config, httpClient.Transport)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -111,9 +111,8 @@ func TestClientRetryAfter(t *testing.T) {
|
|||
|
||||
c := getClient(conf)
|
||||
err = c.Store(context.Background(), []byte{})
|
||||
if _, ok := err.(RecoverableError); ok {
|
||||
t.Fatal("recoverable error not expected")
|
||||
}
|
||||
_, ok := err.(RecoverableError)
|
||||
require.False(t, ok, "Recoverable error not expected.")
|
||||
|
||||
conf = &ClientConfig{
|
||||
URL: &config_util.URL{URL: serverURL},
|
||||
|
@ -123,9 +122,8 @@ func TestClientRetryAfter(t *testing.T) {
|
|||
|
||||
c = getClient(conf)
|
||||
err = c.Store(context.Background(), []byte{})
|
||||
if _, ok := err.(RecoverableError); !ok {
|
||||
t.Fatal("recoverable error was expected")
|
||||
}
|
||||
_, ok = err.(RecoverableError)
|
||||
require.True(t, ok, "Recoverable error was expected.")
|
||||
}
|
||||
|
||||
func TestRetryAfterDuration(t *testing.T) {
|
||||
|
|
|
@ -78,9 +78,7 @@ func TestSampledReadEndpoint(t *testing.T) {
|
|||
recorder := httptest.NewRecorder()
|
||||
h.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code/100 != 2 {
|
||||
t.Fatal(recorder.Code)
|
||||
}
|
||||
require.Equal(t, 2, recorder.Code/100)
|
||||
|
||||
require.Equal(t, "application/x-protobuf", recorder.Result().Header.Get("Content-Type"))
|
||||
require.Equal(t, "snappy", recorder.Result().Header.Get("Content-Encoding"))
|
||||
|
@ -96,9 +94,7 @@ func TestSampledReadEndpoint(t *testing.T) {
|
|||
err = proto.Unmarshal(uncompressed, &resp)
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(resp.Results) != 1 {
|
||||
t.Fatalf("Expected 1 result, got %d", len(resp.Results))
|
||||
}
|
||||
require.Equal(t, 1, len(resp.Results), "Expected 1 result.")
|
||||
|
||||
require.Equal(t, &prompb.QueryResult{
|
||||
Timeseries: []*prompb.TimeSeries{
|
||||
|
@ -189,9 +185,7 @@ func TestStreamReadEndpoint(t *testing.T) {
|
|||
recorder := httptest.NewRecorder()
|
||||
api.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code/100 != 2 {
|
||||
t.Fatal(recorder.Code)
|
||||
}
|
||||
require.Equal(t, 2, recorder.Code/100)
|
||||
|
||||
require.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type"))
|
||||
require.Equal(t, "", recorder.Result().Header.Get("Content-Encoding"))
|
||||
|
@ -208,9 +202,7 @@ func TestStreamReadEndpoint(t *testing.T) {
|
|||
results = append(results, res)
|
||||
}
|
||||
|
||||
if len(results) != 5 {
|
||||
t.Fatalf("Expected 5 result, got %d", len(results))
|
||||
}
|
||||
require.Equal(t, 5, len(results), "Expected 5 results.")
|
||||
|
||||
require.Equal(t, []*prompb.ChunkedReadResponse{
|
||||
{
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
signer "github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/prometheus/prometheus/config"
|
||||
)
|
||||
|
||||
var sigv4HeaderDenylist = []string{
|
||||
"uber-trace-id",
|
||||
}
|
||||
|
||||
type sigV4RoundTripper struct {
|
||||
region string
|
||||
next http.RoundTripper
|
||||
pool sync.Pool
|
||||
|
||||
signer *signer.Signer
|
||||
}
|
||||
|
||||
// newSigV4RoundTripper returns a new http.RoundTripper that will sign requests
|
||||
// using Amazon's Signature Verification V4 signing procedure. The request will
|
||||
// then be handed off to the next RoundTripper provided by next. If next is nil,
|
||||
// http.DefaultTransport will be used.
|
||||
//
|
||||
// Credentials for signing are retrieved using the the default AWS credential
|
||||
// chain. If credentials cannot be found, an error will be returned.
|
||||
func newSigV4RoundTripper(cfg *config.SigV4Config, next http.RoundTripper) (http.RoundTripper, error) {
|
||||
if next == nil {
|
||||
next = http.DefaultTransport
|
||||
}
|
||||
|
||||
creds := credentials.NewStaticCredentials(cfg.AccessKey, string(cfg.SecretKey), "")
|
||||
if cfg.AccessKey == "" && cfg.SecretKey == "" {
|
||||
creds = nil
|
||||
}
|
||||
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Config: aws.Config{
|
||||
Region: aws.String(cfg.Region),
|
||||
Credentials: creds,
|
||||
},
|
||||
Profile: cfg.Profile,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create new AWS session: %w", err)
|
||||
}
|
||||
if _, err := sess.Config.Credentials.Get(); err != nil {
|
||||
return nil, fmt.Errorf("could not get SigV4 credentials: %w", err)
|
||||
}
|
||||
if aws.StringValue(sess.Config.Region) == "" {
|
||||
return nil, fmt.Errorf("region not configured in sigv4 or in default credentials chain")
|
||||
}
|
||||
|
||||
signerCreds := sess.Config.Credentials
|
||||
if cfg.RoleARN != "" {
|
||||
signerCreds = stscreds.NewCredentials(sess, cfg.RoleARN)
|
||||
}
|
||||
|
||||
rt := &sigV4RoundTripper{
|
||||
region: cfg.Region,
|
||||
next: next,
|
||||
signer: signer.NewSigner(signerCreds),
|
||||
}
|
||||
rt.pool.New = rt.newBuf
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
func (rt *sigV4RoundTripper) newBuf() interface{} {
|
||||
return bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
}
|
||||
|
||||
func (rt *sigV4RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
// rt.signer.Sign needs a seekable body, so we replace the body with a
|
||||
// buffered reader filled with the contents of original body.
|
||||
buf := rt.pool.Get().(*bytes.Buffer)
|
||||
defer func() {
|
||||
buf.Reset()
|
||||
rt.pool.Put(buf)
|
||||
}()
|
||||
if _, err := io.Copy(buf, req.Body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Close the original body since we don't need it anymore.
|
||||
_ = req.Body.Close()
|
||||
|
||||
// Ensure our seeker is back at the start of the buffer once we return.
|
||||
var seeker io.ReadSeeker = bytes.NewReader(buf.Bytes())
|
||||
defer func() {
|
||||
_, _ = seeker.Seek(0, io.SeekStart)
|
||||
}()
|
||||
req.Body = ioutil.NopCloser(seeker)
|
||||
|
||||
// Clone the request and trim out headers that we don't want to sign.
|
||||
signReq := req.Clone(req.Context())
|
||||
for _, header := range sigv4HeaderDenylist {
|
||||
signReq.Header.Del(header)
|
||||
}
|
||||
|
||||
headers, err := rt.signer.Sign(signReq, seeker, "aps", rt.region, time.Now().UTC())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sign request: %w", err)
|
||||
}
|
||||
|
||||
// Copy over signed headers. Authorization header is not returned by
|
||||
// rt.signer.Sign and needs to be copied separately.
|
||||
for k, v := range headers {
|
||||
req.Header[textproto.CanonicalMIMEHeaderKey(k)] = v
|
||||
}
|
||||
req.Header.Set("Authorization", signReq.Header.Get("Authorization"))
|
||||
|
||||
return rt.next.RoundTrip(req)
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
signer "github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSigV4_Inferred_Region(t *testing.T) {
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "secret")
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "token")
|
||||
os.Setenv("AWS_REGION", "us-west-2")
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{
|
||||
// Setting to an empty string to demostrate the default value from the yaml
|
||||
// won't override the environment's region.
|
||||
Region: aws.String(""),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = sess.Config.Credentials.Get()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, sess.Config.Region)
|
||||
require.Equal(t, "us-west-2", *sess.Config.Region)
|
||||
}
|
||||
|
||||
func TestSigV4RoundTripper(t *testing.T) {
|
||||
var gotReq *http.Request
|
||||
|
||||
rt := &sigV4RoundTripper{
|
||||
region: "us-east-2",
|
||||
next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
|
||||
gotReq = req
|
||||
return &http.Response{StatusCode: http.StatusOK}, nil
|
||||
}),
|
||||
signer: signer.NewSigner(credentials.NewStaticCredentials(
|
||||
"test-id",
|
||||
"secret",
|
||||
"token",
|
||||
)),
|
||||
}
|
||||
rt.pool.New = rt.newBuf
|
||||
|
||||
cli := &http.Client{Transport: rt}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, "google.com", strings.NewReader("Hello, world!"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = cli.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotReq)
|
||||
|
||||
origReq := gotReq
|
||||
require.NotEmpty(t, origReq.Header.Get("Authorization"))
|
||||
require.NotEmpty(t, origReq.Header.Get("X-Amz-Date"))
|
||||
|
||||
// Perform the same request but with a header that shouldn't included in the
|
||||
// signature; validate that the Authorization signature matches.
|
||||
t.Run("Ignored Headers", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPost, "google.com", strings.NewReader("Hello, world!"))
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Header.Add("Uber-Trace-Id", "some-trace-id")
|
||||
|
||||
_, err = cli.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotReq)
|
||||
|
||||
require.Equal(t, origReq.Header.Get("Authorization"), gotReq.Header.Get("Authorization"))
|
||||
})
|
||||
}
|
|
@ -115,6 +115,7 @@ type Expander struct {
|
|||
name string
|
||||
data interface{}
|
||||
funcMap text_template.FuncMap
|
||||
options []string
|
||||
}
|
||||
|
||||
// NewTemplateExpander returns a template expander ready to use.
|
||||
|
@ -126,7 +127,11 @@ func NewTemplateExpander(
|
|||
timestamp model.Time,
|
||||
queryFunc QueryFunc,
|
||||
externalURL *url.URL,
|
||||
options []string,
|
||||
) *Expander {
|
||||
if options == nil {
|
||||
options = []string{"missingkey=zero"}
|
||||
}
|
||||
return &Expander{
|
||||
text: text,
|
||||
name: name,
|
||||
|
@ -291,6 +296,7 @@ func NewTemplateExpander(
|
|||
return externalURL.String()
|
||||
},
|
||||
},
|
||||
options: options,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -336,7 +342,9 @@ func (te Expander) Expand() (result string, resultErr error) {
|
|||
|
||||
templateTextExpansionTotal.Inc()
|
||||
|
||||
tmpl, err := text_template.New(te.name).Funcs(te.funcMap).Option("missingkey=zero").Parse(te.text)
|
||||
tmpl := text_template.New(te.name).Funcs(te.funcMap)
|
||||
tmpl.Option(te.options...)
|
||||
tmpl, err := tmpl.Parse(te.text)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error parsing template %v", te.name)
|
||||
}
|
||||
|
@ -361,7 +369,7 @@ func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr
|
|||
}()
|
||||
|
||||
tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap))
|
||||
tmpl.Option("missingkey=zero")
|
||||
tmpl.Option(te.options...)
|
||||
tmpl.Funcs(html_template.FuncMap{
|
||||
"tmpl": func(name string, data interface{}) (html_template.HTML, error) {
|
||||
var buffer bytes.Buffer
|
||||
|
|
|
@ -31,6 +31,7 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
text string
|
||||
output string
|
||||
input interface{}
|
||||
options []string
|
||||
queryResult promql.Vector
|
||||
shouldFail bool
|
||||
html bool
|
||||
|
@ -50,7 +51,7 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
// Non-ASCII space (not allowed in text/template, see https://github.com/golang/go/blob/master/src/text/template/parse/lex.go#L98)
|
||||
text: "{{ }}",
|
||||
shouldFail: true,
|
||||
errorMsg: "error parsing template test: template: test:1: unexpected unrecognized character in action: U+00A0 in command",
|
||||
errorMsg: "error parsing template test: template: test:1: unrecognized character in action: U+00A0",
|
||||
},
|
||||
{
|
||||
// HTML escaping.
|
||||
|
@ -153,11 +154,50 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
}},
|
||||
output: "a:11: b:21: ",
|
||||
},
|
||||
{
|
||||
// Missing value is no value for nil options.
|
||||
text: "{{ .Foo }}",
|
||||
output: "<no value>",
|
||||
},
|
||||
{
|
||||
// Missing value is no value for no options.
|
||||
text: "{{ .Foo }}",
|
||||
options: make([]string, 0),
|
||||
output: "<no value>",
|
||||
},
|
||||
{
|
||||
// Assert that missing value returns error with missingkey=error.
|
||||
text: "{{ .Foo }}",
|
||||
options: []string{"missingkey=error"},
|
||||
shouldFail: true,
|
||||
errorMsg: `error executing template test: template: test:1:3: executing "test" at <.Foo>: nil data; no entry for key "Foo"`,
|
||||
},
|
||||
{
|
||||
// Missing value is "" for nil options in ExpandHTML.
|
||||
text: "{{ .Foo }}",
|
||||
output: "",
|
||||
html: true,
|
||||
},
|
||||
{
|
||||
// Missing value is "" for no options in ExpandHTML.
|
||||
text: "{{ .Foo }}",
|
||||
options: make([]string, 0),
|
||||
output: "",
|
||||
html: true,
|
||||
},
|
||||
{
|
||||
// Assert that missing value returns error with missingkey=error in ExpandHTML.
|
||||
text: "{{ .Foo }}",
|
||||
options: []string{"missingkey=error"},
|
||||
shouldFail: true,
|
||||
errorMsg: `error executing template test: template: test:1:3: executing "test" at <.Foo>: nil data; no entry for key "Foo"`,
|
||||
html: true,
|
||||
},
|
||||
{
|
||||
// Unparsable template.
|
||||
text: "{{",
|
||||
shouldFail: true,
|
||||
errorMsg: "error parsing template test: template: test:1: unexpected unclosed action in command",
|
||||
errorMsg: "error parsing template test: template: test:1: unclosed action",
|
||||
},
|
||||
{
|
||||
// Error in function.
|
||||
|
@ -194,7 +234,7 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
// Humanize - string with error.
|
||||
text: `{{ humanize "one" }}`,
|
||||
shouldFail: true,
|
||||
errorMsg: `strconv.ParseFloat: parsing "one": invalid syntax`,
|
||||
errorMsg: `error executing template test: template: test:1:3: executing "test" at <humanize "one">: error calling humanize: strconv.ParseFloat: parsing "one": invalid syntax`,
|
||||
},
|
||||
{
|
||||
// Humanize1024 - float64.
|
||||
|
@ -212,7 +252,7 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
// Humanize1024 - string with error.
|
||||
text: `{{ humanize1024 "one" }}`,
|
||||
shouldFail: true,
|
||||
errorMsg: `strconv.ParseFloat: parsing "one": invalid syntax`,
|
||||
errorMsg: `error executing template test: template: test:1:3: executing "test" at <humanize1024 "one">: error calling humanize1024: strconv.ParseFloat: parsing "one": invalid syntax`,
|
||||
},
|
||||
{
|
||||
// HumanizeDuration - seconds - float64.
|
||||
|
@ -242,7 +282,7 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
// HumanizeDuration - string with error.
|
||||
text: `{{ humanizeDuration "one" }}`,
|
||||
shouldFail: true,
|
||||
errorMsg: `strconv.ParseFloat: parsing "one": invalid syntax`,
|
||||
errorMsg: `error executing template test: template: test:1:3: executing "test" at <humanizeDuration "one">: error calling humanizeDuration: strconv.ParseFloat: parsing "one": invalid syntax`,
|
||||
},
|
||||
{
|
||||
// Humanize* Inf and NaN - float64.
|
||||
|
@ -270,7 +310,7 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
// HumanizePercentage - model.SampleValue input - string with error.
|
||||
text: `{{ "one" | humanizePercentage }}`,
|
||||
shouldFail: true,
|
||||
errorMsg: `strconv.ParseFloat: parsing "one": invalid syntax`,
|
||||
errorMsg: `error executing template test: template: test:1:11: executing "test" at <humanizePercentage>: error calling humanizePercentage: strconv.ParseFloat: parsing "one": invalid syntax`,
|
||||
},
|
||||
{
|
||||
// HumanizeTimestamp - model.SampleValue input - float64.
|
||||
|
@ -341,7 +381,7 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
}
|
||||
var result string
|
||||
var err error
|
||||
expander := NewTemplateExpander(context.Background(), s.text, "test", s.input, 0, queryFunc, extURL)
|
||||
expander := NewTemplateExpander(context.Background(), s.text, "test", s.input, 0, queryFunc, extURL, s.options)
|
||||
if s.html {
|
||||
result, err = expander.ExpandHTML(nil)
|
||||
} else {
|
||||
|
@ -349,13 +389,14 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
}
|
||||
if s.shouldFail {
|
||||
require.Error(t, err, "%v", s.text)
|
||||
require.EqualError(t, err, s.errorMsg)
|
||||
continue
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
if err == nil {
|
||||
require.Equal(t, result, s.output)
|
||||
require.Equal(t, s.output, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package chunks
|
||||
|
|
|
@ -54,7 +54,7 @@ import (
|
|||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2"))
|
||||
goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"), goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2"))
|
||||
}
|
||||
|
||||
func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) {
|
||||
|
|
|
@ -3,6 +3,11 @@
|
|||
Memory snapshot uses the WAL package and writes each series as a WAL record.
|
||||
Below are the formats of the individual records.
|
||||
|
||||
The order of records in the snapshot is always:
|
||||
1. Starts with series records, one per series, in an unsorted fashion.
|
||||
2. After all series are done, we write a tombstone record containing all the tombstones.
|
||||
3. At the end, we write one or more exemplar records while batching up the exemplars in each record. Exemplars are in the order they were written to the circular buffer.
|
||||
|
||||
### Series records
|
||||
|
||||
This record is a snapshot of a single series. Only one series exists per record.
|
||||
|
@ -60,3 +65,30 @@ as tombstone file in blocks.
|
|||
│ len(Encoded Tombstones) <uvarint> │ Encoded Tombstones <bytes> │
|
||||
└───────────────────────────────────┴─────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
### Exemplar record
|
||||
|
||||
A single exemplar record contains one or more exemplars, encoded in the same way as we do in WAL but with changed record type.
|
||||
|
||||
```
|
||||
┌───────────────────────────────────────────────────────────────────┐
|
||||
│ Record Type <byte> │
|
||||
├───────────────────────────────────────────────────────────────────┤
|
||||
│ ┌────────────────────┬───────────────────────────┐ │
|
||||
│ │ series ref <8b> │ timestamp <8b> │ │
|
||||
│ └────────────────────┴───────────────────────────┘ │
|
||||
│ ┌─────────────────────┬───────────────────────────┬─────────────┐ │
|
||||
│ │ ref_delta <uvarint> │ timestamp_delta <uvarint> │ value <8b> │ │
|
||||
│ ├─────────────────────┴───────────────────────────┴─────────────┤ │
|
||||
│ │ n = len(labels) <uvarint> │ │
|
||||
│ ├───────────────────────────────┬───────────────────────────────┤ │
|
||||
│ │ len(str_1) <uvarint> │ str_1 <bytes> │ │
|
||||
│ ├───────────────────────────────┴───────────────────────────────┤ │
|
||||
│ │ ... │ │
|
||||
│ ├───────────────────────────────┬───────────────────────────────┤ │
|
||||
│ │ len(str_2n) <uvarint> │ str_2n <bytes> │ │
|
||||
│ ├───────────────────────────────┴───────────────────────────────┤ │
|
||||
│ . . . │
|
||||
└───────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
|
|
@ -279,7 +279,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
|
|||
|
||||
migrated := 0
|
||||
|
||||
if l > 0 {
|
||||
if l > 0 && len(oldBuffer) > 0 {
|
||||
// Rewind previous next index by count with wrap-around.
|
||||
// This math is essentially looking at nextIndex, where we would write the next exemplar to,
|
||||
// and find the index in the old exemplar buffer that we should start migrating exemplars from.
|
||||
|
@ -400,3 +400,23 @@ func (ce *CircularExemplarStorage) computeMetrics() {
|
|||
ce.metrics.lastExemplarsTs.Set(float64(ce.exemplars[0].exemplar.Ts) / 1000)
|
||||
}
|
||||
}
|
||||
|
||||
// IterateExemplars iterates through all the exemplars from oldest to newest appended and calls
|
||||
// the given function on all of them till the end (or) till the first function call that returns an error.
|
||||
func (ce *CircularExemplarStorage) IterateExemplars(f func(seriesLabels labels.Labels, e exemplar.Exemplar) error) error {
|
||||
ce.lock.RLock()
|
||||
defer ce.lock.RUnlock()
|
||||
|
||||
idx := ce.nextIndex
|
||||
l := len(ce.exemplars)
|
||||
for i := 0; i < l; i, idx = i+1, (idx+1)%l {
|
||||
if ce.exemplars[idx] == nil {
|
||||
continue
|
||||
}
|
||||
err := f(ce.exemplars[idx].ref.seriesLabels, ce.exemplars[idx].exemplar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -413,7 +413,7 @@ func TestResize(t *testing.T) {
|
|||
expectedMigrated: 50,
|
||||
},
|
||||
{
|
||||
name: "Zero",
|
||||
name: "ShrinkToZero",
|
||||
startSize: 100,
|
||||
newCount: 0,
|
||||
expectedSeries: []int{},
|
||||
|
@ -436,6 +436,14 @@ func TestResize(t *testing.T) {
|
|||
notExpectedSeries: []int{},
|
||||
expectedMigrated: 0,
|
||||
},
|
||||
{
|
||||
name: "GrowFromZero",
|
||||
startSize: 0,
|
||||
newCount: 10,
|
||||
expectedSeries: []int{},
|
||||
notExpectedSeries: []int{},
|
||||
expectedMigrated: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
@ -477,16 +485,27 @@ func TestResize(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkAddExemplar(t *testing.B) {
|
||||
exs, err := NewCircularExemplarStorage(int64(t.N), eMetrics)
|
||||
require.NoError(t, err)
|
||||
es := exs.(*CircularExemplarStorage)
|
||||
func BenchmarkAddExemplar(b *testing.B) {
|
||||
// We need to include these labels since we do length calculation
|
||||
// before adding.
|
||||
exLabels := labels.Labels{{Name: "traceID", Value: "89620921"}}
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
l := labels.FromStrings("service", strconv.Itoa(i))
|
||||
for _, n := range []int{10000, 100000, 1000000} {
|
||||
b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
|
||||
exs, err := NewCircularExemplarStorage(int64(n), eMetrics)
|
||||
require.NoError(b, err)
|
||||
es := exs.(*CircularExemplarStorage)
|
||||
|
||||
err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i)})
|
||||
require.NoError(t, err)
|
||||
b.ResetTimer()
|
||||
l := labels.Labels{{Name: "service", Value: strconv.Itoa(0)}}
|
||||
for i := 0; i < n; i++ {
|
||||
if i%100 == 0 {
|
||||
l = labels.Labels{{Name: "service", Value: strconv.Itoa(i)}}
|
||||
}
|
||||
err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i), Labels: exLabels})
|
||||
require.NoError(b, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build solaris
|
||||
// +build solaris
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows && !plan9
|
||||
// +build !windows,!plan9
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !linux && !darwin
|
||||
// +build !linux,!darwin
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !linux && !darwin
|
||||
// +build !linux,!darwin
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package fileutil
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.12
|
||||
// +build go1.12
|
||||
|
||||
// Package goversion enforces the go version supported by the tsdb module.
|
||||
|
|
34
tsdb/head.go
34
tsdb/head.go
|
@ -79,9 +79,6 @@ type Head struct {
|
|||
// All series addressable by their ID or hash.
|
||||
series *stripeSeries
|
||||
|
||||
symMtx sync.RWMutex
|
||||
symbols map[string]struct{}
|
||||
|
||||
deletedMtx sync.Mutex
|
||||
deleted map[uint64]int // Deleted series, and what WAL segment they must be kept until.
|
||||
|
||||
|
@ -113,6 +110,7 @@ type ExemplarStorage interface {
|
|||
storage.ExemplarQueryable
|
||||
AddExemplar(labels.Labels, exemplar.Exemplar) error
|
||||
ValidateExemplar(labels.Labels, exemplar.Exemplar) error
|
||||
IterateExemplars(f func(seriesLabels labels.Labels, e exemplar.Exemplar) error) error
|
||||
}
|
||||
|
||||
// HeadOptions are parameters for the Head block.
|
||||
|
@ -222,7 +220,6 @@ func (h *Head) resetInMemoryState() error {
|
|||
h.exemplarMetrics = em
|
||||
h.exemplars = es
|
||||
h.series = newStripeSeries(h.opts.StripeSize, h.opts.SeriesCallback)
|
||||
h.symbols = map[string]struct{}{}
|
||||
h.postings = index.NewUnorderedMemPostings()
|
||||
h.tombstones = tombstones.NewMemTombstones()
|
||||
h.iso = newIsolation()
|
||||
|
@ -454,7 +451,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second
|
|||
// Init loads data from the write ahead log and prepares the head for writes.
|
||||
// It should be called before using an appender so that it
|
||||
// limits the ingested samples to the head min valid time.
|
||||
func (h *Head) Init(minValidTime int64) (err error) {
|
||||
func (h *Head) Init(minValidTime int64) error {
|
||||
h.minValidTime.Store(minValidTime)
|
||||
defer h.postings.EnsureOrder()
|
||||
defer h.gc() // After loading the wal remove the obsolete data from the head.
|
||||
|
@ -474,6 +471,7 @@ func (h *Head) Init(minValidTime int64) (err error) {
|
|||
|
||||
if h.opts.EnableMemorySnapshotOnShutdown {
|
||||
level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot")
|
||||
var err error
|
||||
snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot()
|
||||
if err != nil {
|
||||
snapIdx, snapOffset = -1, 0
|
||||
|
@ -523,7 +521,7 @@ func (h *Head) Init(minValidTime int64) (err error) {
|
|||
h.startWALReplayStatus(startFrom, endAt)
|
||||
|
||||
multiRef := map[uint64]uint64{}
|
||||
if err == nil {
|
||||
if err == nil && startFrom >= snapIdx {
|
||||
sr, err := wal.NewSegmentsReader(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "open checkpoint")
|
||||
|
@ -1118,22 +1116,6 @@ func (h *Head) gc() int64 {
|
|||
h.deletedMtx.Unlock()
|
||||
}
|
||||
|
||||
// Rebuild symbols and label value indices from what is left in the postings terms.
|
||||
// symMtx ensures that append of symbols and postings is disabled for rebuild time.
|
||||
h.symMtx.Lock()
|
||||
defer h.symMtx.Unlock()
|
||||
|
||||
symbols := make(map[string]struct{}, len(h.symbols))
|
||||
if err := h.postings.Iter(func(l labels.Label, _ index.Postings) error {
|
||||
symbols[l.Name] = struct{}{}
|
||||
symbols[l.Value] = struct{}{}
|
||||
return nil
|
||||
}); err != nil {
|
||||
// This should never happen, as the iteration function only returns nil.
|
||||
panic(err)
|
||||
}
|
||||
h.symbols = symbols
|
||||
|
||||
return actualMint
|
||||
}
|
||||
|
||||
|
@ -1232,14 +1214,6 @@ func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSerie
|
|||
h.metrics.seriesCreated.Inc()
|
||||
h.numSeries.Inc()
|
||||
|
||||
h.symMtx.Lock()
|
||||
defer h.symMtx.Unlock()
|
||||
|
||||
for _, l := range lset {
|
||||
h.symbols[l.Name] = struct{}{}
|
||||
h.symbols[l.Value] = struct{}{}
|
||||
}
|
||||
|
||||
h.postings.Add(id, lset)
|
||||
return s, true, nil
|
||||
}
|
||||
|
|
|
@ -85,6 +85,7 @@ func (a *initAppender) GetRef(lset labels.Labels) (uint64, labels.Labels) {
|
|||
|
||||
func (a *initAppender) Commit() error {
|
||||
if a.app == nil {
|
||||
a.head.metrics.activeAppenders.Dec()
|
||||
return nil
|
||||
}
|
||||
return a.app.Commit()
|
||||
|
@ -92,6 +93,7 @@ func (a *initAppender) Commit() error {
|
|||
|
||||
func (a *initAppender) Rollback() error {
|
||||
if a.app == nil {
|
||||
a.head.metrics.activeAppenders.Dec()
|
||||
return nil
|
||||
}
|
||||
return a.app.Rollback()
|
||||
|
|
|
@ -54,16 +54,7 @@ func (h *headIndexReader) Close() error {
|
|||
}
|
||||
|
||||
func (h *headIndexReader) Symbols() index.StringIter {
|
||||
h.head.symMtx.RLock()
|
||||
res := make([]string, 0, len(h.head.symbols))
|
||||
|
||||
for s := range h.head.symbols {
|
||||
res = append(res, s)
|
||||
}
|
||||
h.head.symMtx.RUnlock()
|
||||
|
||||
sort.Strings(res)
|
||||
return index.NewStringListIter(res)
|
||||
return h.head.postings.Symbols()
|
||||
}
|
||||
|
||||
// SortedLabelValues returns label values present in the head for the
|
||||
|
@ -88,8 +79,6 @@ func (h *headIndexReader) LabelValues(name string, matchers ...*labels.Matcher)
|
|||
}
|
||||
|
||||
if len(matchers) == 0 {
|
||||
h.head.symMtx.RLock()
|
||||
defer h.head.symMtx.RUnlock()
|
||||
return h.head.postings.LabelValues(name), nil
|
||||
}
|
||||
|
||||
|
@ -104,10 +93,7 @@ func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, err
|
|||
}
|
||||
|
||||
if len(matchers) == 0 {
|
||||
h.head.symMtx.RLock()
|
||||
labelNames := h.head.postings.LabelNames()
|
||||
h.head.symMtx.RUnlock()
|
||||
|
||||
sort.Strings(labelNames)
|
||||
return labelNames, nil
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -403,6 +404,39 @@ func TestHead_WALMultiRef(t *testing.T) {
|
|||
}}, series)
|
||||
}
|
||||
|
||||
func TestHead_ActiveAppenders(t *testing.T) {
|
||||
head, _ := newTestHead(t, 1000, false)
|
||||
defer head.Close()
|
||||
|
||||
require.NoError(t, head.Init(0))
|
||||
|
||||
// First rollback with no samples.
|
||||
app := head.Appender(context.Background())
|
||||
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
|
||||
require.NoError(t, app.Rollback())
|
||||
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
|
||||
|
||||
// Then commit with no samples.
|
||||
app = head.Appender(context.Background())
|
||||
require.NoError(t, app.Commit())
|
||||
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
|
||||
|
||||
// Now rollback with one sample.
|
||||
app = head.Appender(context.Background())
|
||||
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 100, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
|
||||
require.NoError(t, app.Rollback())
|
||||
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
|
||||
|
||||
// Now commit with one sample.
|
||||
app = head.Appender(context.Background())
|
||||
_, err = app.Append(0, labels.FromStrings("foo", "bar"), 100, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
|
||||
}
|
||||
|
||||
func TestHead_UnknownWALRecord(t *testing.T) {
|
||||
head, w := newTestHead(t, 1000, false)
|
||||
w.Log([]byte{255, 42})
|
||||
|
@ -470,13 +504,14 @@ func TestHead_Truncate(t *testing.T) {
|
|||
require.Nil(t, postingsB2)
|
||||
require.Nil(t, postingsC1)
|
||||
|
||||
require.Equal(t, map[string]struct{}{
|
||||
"": {}, // from 'all' postings list
|
||||
"a": {},
|
||||
"b": {},
|
||||
"1": {},
|
||||
"2": {},
|
||||
}, h.symbols)
|
||||
iter := h.postings.Symbols()
|
||||
symbols := []string{}
|
||||
for iter.Next() {
|
||||
symbols = append(symbols, iter.At())
|
||||
}
|
||||
require.Equal(t,
|
||||
[]string{"" /* from 'all' postings list */, "1", "2", "a", "b"},
|
||||
symbols)
|
||||
|
||||
values := map[string]map[string]struct{}{}
|
||||
for _, name := range h.postings.LabelNames() {
|
||||
|
@ -2564,9 +2599,88 @@ func TestChunkSnapshot(t *testing.T) {
|
|||
require.NoError(t, head.Close())
|
||||
}()
|
||||
|
||||
type ex struct {
|
||||
seriesLabels labels.Labels
|
||||
e exemplar.Exemplar
|
||||
}
|
||||
|
||||
numSeries := 10
|
||||
expSeries := make(map[string][]tsdbutil.Sample)
|
||||
expTombstones := make(map[uint64]tombstones.Intervals)
|
||||
expExemplars := make([]ex, 0)
|
||||
|
||||
addExemplar := func(app storage.Appender, ref uint64, lbls labels.Labels, ts int64) {
|
||||
e := ex{
|
||||
seriesLabels: lbls,
|
||||
e: exemplar.Exemplar{
|
||||
Labels: labels.Labels{{Name: "traceID", Value: fmt.Sprintf("%d", rand.Int())}},
|
||||
Value: rand.Float64(),
|
||||
Ts: ts,
|
||||
},
|
||||
}
|
||||
expExemplars = append(expExemplars, e)
|
||||
_, err := app.AppendExemplar(ref, e.seriesLabels, e.e)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
checkSamples := func() {
|
||||
q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", ".*"))
|
||||
require.Equal(t, expSeries, series)
|
||||
}
|
||||
checkTombstones := func() {
|
||||
tr, err := head.Tombstones()
|
||||
require.NoError(t, err)
|
||||
actTombstones := make(map[uint64]tombstones.Intervals)
|
||||
require.NoError(t, tr.Iter(func(ref uint64, itvs tombstones.Intervals) error {
|
||||
for _, itv := range itvs {
|
||||
actTombstones[ref].Add(itv)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
require.Equal(t, expTombstones, actTombstones)
|
||||
}
|
||||
checkExemplars := func() {
|
||||
actExemplars := make([]ex, 0, len(expExemplars))
|
||||
err := head.exemplars.IterateExemplars(func(seriesLabels labels.Labels, e exemplar.Exemplar) error {
|
||||
actExemplars = append(actExemplars, ex{
|
||||
seriesLabels: seriesLabels,
|
||||
e: e,
|
||||
})
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Verifies both existence of right exemplars and order of exemplars in the buffer.
|
||||
require.Equal(t, expExemplars, actExemplars)
|
||||
}
|
||||
|
||||
var (
|
||||
wlast, woffset int
|
||||
err error
|
||||
)
|
||||
|
||||
closeHeadAndCheckSnapshot := func() {
|
||||
require.NoError(t, head.Close())
|
||||
|
||||
_, sidx, soffset, err := LastChunkSnapshot(head.opts.ChunkDirRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wlast, sidx)
|
||||
require.Equal(t, woffset, soffset)
|
||||
}
|
||||
|
||||
openHeadAndCheckReplay := func() {
|
||||
w, err := wal.NewSize(nil, nil, head.wal.Dir(), 32768, false)
|
||||
require.NoError(t, err)
|
||||
head, err = NewHead(nil, nil, w, head.opts, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, head.Init(math.MinInt64))
|
||||
|
||||
checkSamples()
|
||||
checkTombstones()
|
||||
checkExemplars()
|
||||
}
|
||||
|
||||
{ // Initial data that goes into snapshot.
|
||||
// Add some initial samples with >=1 m-map chunk.
|
||||
app := head.Appender(context.Background())
|
||||
|
@ -2577,11 +2691,12 @@ func TestChunkSnapshot(t *testing.T) {
|
|||
for ts := int64(1); ts <= 200; ts++ {
|
||||
val := rand.Float64()
|
||||
expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val})
|
||||
_, err := app.Append(0, lbls, ts, val)
|
||||
ref, err := app.Append(0, lbls, ts, val)
|
||||
require.NoError(t, err)
|
||||
|
||||
// To create multiple WAL records.
|
||||
// Add an exemplar and to create multiple WAL records.
|
||||
if ts%10 == 0 {
|
||||
addExemplar(app, ref, lbls, ts)
|
||||
require.NoError(t, app.Commit())
|
||||
app = head.Appender(context.Background())
|
||||
}
|
||||
|
@ -2606,47 +2721,20 @@ func TestChunkSnapshot(t *testing.T) {
|
|||
}, nil))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// These references should be the ones used for the snapshot.
|
||||
wlast, woffset, err := head.wal.LastSegmentAndOffset()
|
||||
wlast, woffset, err = head.wal.LastSegmentAndOffset()
|
||||
require.NoError(t, err)
|
||||
|
||||
{ // Creating snapshot and verifying it.
|
||||
{
|
||||
// Creating snapshot and verifying it.
|
||||
head.opts.EnableMemorySnapshotOnShutdown = true
|
||||
require.NoError(t, head.Close()) // This will create a snapshot.
|
||||
closeHeadAndCheckSnapshot() // This will create a snapshot.
|
||||
|
||||
_, sidx, soffset, err := LastChunkSnapshot(head.opts.ChunkDirRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wlast, sidx)
|
||||
require.Equal(t, woffset, soffset)
|
||||
}
|
||||
|
||||
{ // Test the replay of snapshot.
|
||||
// Create new Head which should replay this snapshot.
|
||||
w, err := wal.NewSize(nil, nil, head.wal.Dir(), 32768, false)
|
||||
require.NoError(t, err)
|
||||
head, err = NewHead(nil, nil, w, head.opts, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, head.Init(math.MinInt64))
|
||||
|
||||
// Test query for snapshot replay.
|
||||
q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", ".*"))
|
||||
require.Equal(t, expSeries, series)
|
||||
|
||||
// Check the tombstones.
|
||||
tr, err := head.Tombstones()
|
||||
require.NoError(t, err)
|
||||
actTombstones := make(map[uint64]tombstones.Intervals)
|
||||
require.NoError(t, tr.Iter(func(ref uint64, itvs tombstones.Intervals) error {
|
||||
for _, itv := range itvs {
|
||||
actTombstones[ref].Add(itv)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
require.Equal(t, expTombstones, actTombstones)
|
||||
// Test the replay of snapshot.
|
||||
openHeadAndCheckReplay()
|
||||
}
|
||||
|
||||
{ // Additional data to only include in WAL and m-mapped chunks and not snapshot. This mimics having an old snapshot on disk.
|
||||
|
@ -2660,11 +2748,12 @@ func TestChunkSnapshot(t *testing.T) {
|
|||
for ts := int64(201); ts <= 400; ts++ {
|
||||
val := rand.Float64()
|
||||
expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val})
|
||||
_, err := app.Append(0, lbls, ts, val)
|
||||
ref, err := app.Append(0, lbls, ts, val)
|
||||
require.NoError(t, err)
|
||||
|
||||
// To create multiple WAL records.
|
||||
// Add an exemplar and to create multiple WAL records.
|
||||
if ts%10 == 0 {
|
||||
addExemplar(app, ref, lbls, ts)
|
||||
require.NoError(t, app.Commit())
|
||||
app = head.Appender(context.Background())
|
||||
}
|
||||
|
@ -2691,43 +2780,42 @@ func TestChunkSnapshot(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
{ // Close Head and verify that new snapshot was not created.
|
||||
{
|
||||
// Close Head and verify that new snapshot was not created.
|
||||
head.opts.EnableMemorySnapshotOnShutdown = false
|
||||
require.NoError(t, head.Close()) // This should not create a snapshot.
|
||||
closeHeadAndCheckSnapshot() // This should not create a snapshot.
|
||||
|
||||
_, sidx, soffset, err := LastChunkSnapshot(head.opts.ChunkDirRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wlast, sidx)
|
||||
require.Equal(t, woffset, soffset)
|
||||
}
|
||||
|
||||
{ // Test the replay of snapshot, m-map chunks, and WAL.
|
||||
// Create new Head to replay snapshot, m-map chunks, and WAL.
|
||||
w, err := wal.NewSize(nil, nil, head.wal.Dir(), 32768, false)
|
||||
require.NoError(t, err)
|
||||
// Test the replay of snapshot, m-map chunks, and WAL.
|
||||
head.opts.EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
|
||||
head, err = NewHead(nil, nil, w, head.opts, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, head.Init(math.MinInt64))
|
||||
|
||||
// Test query when data is replayed from snapshot, m-map chunks, and WAL.
|
||||
q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", ".*"))
|
||||
require.Equal(t, expSeries, series)
|
||||
|
||||
// Check the tombstones.
|
||||
tr, err := head.Tombstones()
|
||||
require.NoError(t, err)
|
||||
actTombstones := make(map[uint64]tombstones.Intervals)
|
||||
require.NoError(t, tr.Iter(func(ref uint64, itvs tombstones.Intervals) error {
|
||||
for _, itv := range itvs {
|
||||
actTombstones[ref].Add(itv)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
require.Equal(t, expTombstones, actTombstones)
|
||||
openHeadAndCheckReplay()
|
||||
}
|
||||
|
||||
// Creating another snapshot should delete the older snapshot and replay still works fine.
|
||||
wlast, woffset, err = head.wal.LastSegmentAndOffset()
|
||||
require.NoError(t, err)
|
||||
|
||||
{
|
||||
// Close Head and verify that new snapshot was created.
|
||||
closeHeadAndCheckSnapshot()
|
||||
|
||||
// Verify that there is only 1 snapshot.
|
||||
files, err := ioutil.ReadDir(head.opts.ChunkDirRoot)
|
||||
require.NoError(t, err)
|
||||
snapshots := 0
|
||||
for i := len(files) - 1; i >= 0; i-- {
|
||||
fi := files[i]
|
||||
if strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) {
|
||||
snapshots++
|
||||
require.Equal(t, chunkSnapshotDir(wlast, woffset), fi.Name())
|
||||
}
|
||||
}
|
||||
require.Equal(t, 1, snapshots)
|
||||
|
||||
// Test the replay of snapshot.
|
||||
head.opts.EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
|
||||
openHeadAndCheckReplay()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSnapshotError(t *testing.T) {
|
||||
|
|
149
tsdb/head_wal.go
149
tsdb/head_wal.go
|
@ -114,6 +114,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks
|
|||
wg.Add(1)
|
||||
exemplarsInput = make(chan record.RefExemplar, 300)
|
||||
go func(input <-chan record.RefExemplar) {
|
||||
var err error
|
||||
defer wg.Done()
|
||||
for e := range input {
|
||||
ms := h.series.getByID(e.Ref)
|
||||
|
@ -413,6 +414,7 @@ func (h *Head) processWALSamples(
|
|||
const (
|
||||
chunkSnapshotRecordTypeSeries uint8 = 1
|
||||
chunkSnapshotRecordTypeTombstones uint8 = 2
|
||||
chunkSnapshotRecordTypeExemplars uint8 = 3
|
||||
)
|
||||
|
||||
type chunkSnapshotRecord struct {
|
||||
|
@ -537,6 +539,10 @@ const chunkSnapshotPrefix = "chunk_snapshot."
|
|||
// The chunk snapshot is stored in a directory named chunk_snapshot.N.M and is written
|
||||
// using the WAL package. N is the last WAL segment present during snapshotting and
|
||||
// M is the offset in segment N upto which data was written.
|
||||
//
|
||||
// The snapshot first contains all series (each in individual records and not sorted), followed by
|
||||
// tombstones (a single record), and finally exemplars (>= 1 record). Exemplars are in the order they
|
||||
// were written to the circular buffer.
|
||||
func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
|
||||
if h.wal == nil {
|
||||
// If we are not storing any WAL, does not make sense to take a snapshot too.
|
||||
|
@ -563,7 +569,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
|
|||
return stats, nil
|
||||
}
|
||||
|
||||
snapshotName := fmt.Sprintf(chunkSnapshotPrefix+"%06d.%010d", wlast, woffset)
|
||||
snapshotName := chunkSnapshotDir(wlast, woffset)
|
||||
|
||||
cpdir := filepath.Join(h.opts.ChunkDirRoot, snapshotName)
|
||||
cpdirtmp := cpdir + ".tmp"
|
||||
|
@ -587,6 +593,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
|
|||
buf []byte
|
||||
recs [][]byte
|
||||
)
|
||||
// Add all series to the snapshot.
|
||||
stripeSize := h.series.size
|
||||
for i := 0; i < stripeSize; i++ {
|
||||
h.series.locks[i].RLock()
|
||||
|
@ -622,11 +629,61 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
|
|||
return stats, errors.Wrap(err, "encode tombstones")
|
||||
}
|
||||
recs = append(recs, rec)
|
||||
|
||||
// Flush remaining records.
|
||||
// Flush remaining series records and tombstones.
|
||||
if err := cp.Log(recs...); err != nil {
|
||||
return stats, errors.Wrap(err, "flush records")
|
||||
}
|
||||
buf = buf[:0]
|
||||
|
||||
// Add exemplars in the snapshot.
|
||||
// We log in batches, with each record having upto 10000 exemplars.
|
||||
// Assuming 100 bytes (overestimate) per exemplar, that's ~1MB.
|
||||
maxExemplarsPerRecord := 10000
|
||||
batch := make([]record.RefExemplar, 0, maxExemplarsPerRecord)
|
||||
enc := record.Encoder{}
|
||||
flushExemplars := func() error {
|
||||
if len(batch) == 0 {
|
||||
return nil
|
||||
}
|
||||
buf = buf[:0]
|
||||
encbuf := encoding.Encbuf{B: buf}
|
||||
encbuf.PutByte(chunkSnapshotRecordTypeExemplars)
|
||||
enc.EncodeExemplarsIntoBuffer(batch, &encbuf)
|
||||
if err := cp.Log(encbuf.Get()); err != nil {
|
||||
return errors.Wrap(err, "log exemplars")
|
||||
}
|
||||
buf, batch = buf[:0], batch[:0]
|
||||
return nil
|
||||
}
|
||||
err = h.exemplars.IterateExemplars(func(seriesLabels labels.Labels, e exemplar.Exemplar) error {
|
||||
if len(batch) >= maxExemplarsPerRecord {
|
||||
if err := flushExemplars(); err != nil {
|
||||
return errors.Wrap(err, "flush exemplars")
|
||||
}
|
||||
}
|
||||
|
||||
ms := h.series.getByHash(seriesLabels.Hash(), seriesLabels)
|
||||
if ms == nil {
|
||||
// It is possible that exemplar refers to some old series. We discard such exemplars.
|
||||
return nil
|
||||
}
|
||||
batch = append(batch, record.RefExemplar{
|
||||
Ref: ms.ref,
|
||||
T: e.Ts,
|
||||
V: e.Value,
|
||||
Labels: e.Labels,
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return stats, errors.Wrap(err, "iterate exemplars")
|
||||
}
|
||||
|
||||
// Flush remaining exemplars.
|
||||
if err := flushExemplars(); err != nil {
|
||||
return stats, errors.Wrap(err, "flush exemplars at the end")
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
return stats, errors.Wrap(err, "close chunk snapshot")
|
||||
}
|
||||
|
@ -634,7 +691,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
|
|||
return stats, errors.Wrap(err, "rename chunk snapshot directory")
|
||||
}
|
||||
|
||||
if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, cslast, csoffset); err != nil {
|
||||
if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, wlast, woffset); err != nil {
|
||||
// Leftover old chunk snapshots do not cause problems down the line beyond
|
||||
// occupying disk space.
|
||||
// They will just be ignored since a higher chunk snapshot exists.
|
||||
|
@ -643,6 +700,10 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
|
|||
return stats, nil
|
||||
}
|
||||
|
||||
func chunkSnapshotDir(wlast, woffset int) string {
|
||||
return fmt.Sprintf(chunkSnapshotPrefix+"%06d.%010d", wlast, woffset)
|
||||
}
|
||||
|
||||
func (h *Head) performChunkSnapshot() error {
|
||||
level.Info(h.logger).Log("msg", "creating chunk snapshot")
|
||||
startTime := time.Now()
|
||||
|
@ -667,8 +728,9 @@ func LastChunkSnapshot(dir string) (string, int, int, error) {
|
|||
if err != nil {
|
||||
return "", 0, 0, err
|
||||
}
|
||||
// Traverse list backwards since there may be multiple chunk snapshots left.
|
||||
for i := len(files) - 1; i >= 0; i-- {
|
||||
maxIdx, maxOffset := -1, -1
|
||||
maxFileName := ""
|
||||
for i := 0; i < len(files); i++ {
|
||||
fi := files[i]
|
||||
|
||||
if !strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) {
|
||||
|
@ -693,9 +755,15 @@ func LastChunkSnapshot(dir string) (string, int, int, error) {
|
|||
continue
|
||||
}
|
||||
|
||||
return filepath.Join(dir, fi.Name()), idx, offset, nil
|
||||
if idx > maxIdx || (idx == maxIdx && offset > maxOffset) {
|
||||
maxIdx, maxOffset = idx, offset
|
||||
maxFileName = filepath.Join(dir, fi.Name())
|
||||
}
|
||||
}
|
||||
return "", 0, 0, record.ErrNotFound
|
||||
if maxFileName == "" {
|
||||
return "", 0, 0, record.ErrNotFound
|
||||
}
|
||||
return maxFileName, maxIdx, maxOffset, nil
|
||||
}
|
||||
|
||||
// DeleteChunkSnapshots deletes all chunk snapshots in a directory below a given index.
|
||||
|
@ -726,7 +794,7 @@ func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error {
|
|||
continue
|
||||
}
|
||||
|
||||
if idx <= maxIndex && offset < maxOffset {
|
||||
if idx < maxIndex || (idx == maxIndex && offset < maxOffset) {
|
||||
if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil {
|
||||
errs.Add(err)
|
||||
}
|
||||
|
@ -766,6 +834,9 @@ func (h *Head) loadChunkSnapshot() (int, int, map[uint64]*memSeries, error) {
|
|||
recordChan = make(chan chunkSnapshotRecord, 5*n)
|
||||
shardedRefSeries = make([]map[uint64]*memSeries, n)
|
||||
errChan = make(chan error, n)
|
||||
refSeries map[uint64]*memSeries
|
||||
exemplarBuf []record.RefExemplar
|
||||
dec record.Decoder
|
||||
)
|
||||
|
||||
wg.Add(n)
|
||||
|
@ -852,15 +923,58 @@ Outer:
|
|||
loopErr = errors.Wrap(err, "iterate tombstones")
|
||||
break Outer
|
||||
}
|
||||
|
||||
case chunkSnapshotRecordTypeExemplars:
|
||||
// Exemplars are at the end of snapshot. So all series are loaded at this point.
|
||||
if len(refSeries) == 0 {
|
||||
close(recordChan)
|
||||
wg.Wait()
|
||||
|
||||
refSeries = make(map[uint64]*memSeries, numSeries)
|
||||
for _, shard := range shardedRefSeries {
|
||||
for k, v := range shard {
|
||||
refSeries[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
decbuf := encoding.Decbuf{B: rec[1:]}
|
||||
|
||||
exemplarBuf = exemplarBuf[:0]
|
||||
exemplarBuf, err = dec.ExemplarsFromBuffer(&decbuf, exemplarBuf)
|
||||
if err != nil {
|
||||
loopErr = errors.Wrap(err, "exemplars from buffer")
|
||||
break Outer
|
||||
}
|
||||
|
||||
for _, e := range exemplarBuf {
|
||||
ms, ok := refSeries[e.Ref]
|
||||
if !ok {
|
||||
unknownRefs++
|
||||
continue
|
||||
}
|
||||
|
||||
if err := h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{
|
||||
Labels: e.Labels,
|
||||
Value: e.V,
|
||||
Ts: e.T,
|
||||
}); err != nil {
|
||||
loopErr = errors.Wrap(err, "append exemplar")
|
||||
break Outer
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
// This is a record type we don't understand. It is either and old format from earlier versions,
|
||||
// or a new format and the code was rolled back to old version.
|
||||
loopErr = errors.Errorf("unsuported snapshot record type 0b%b", rec[0])
|
||||
break Outer
|
||||
}
|
||||
|
||||
}
|
||||
close(recordChan)
|
||||
wg.Wait()
|
||||
if len(refSeries) == 0 {
|
||||
close(recordChan)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
close(errChan)
|
||||
merr := tsdb_errors.NewMulti(errors.Wrap(loopErr, "decode loop"))
|
||||
|
@ -875,10 +989,13 @@ Outer:
|
|||
return -1, -1, nil, errors.Wrap(r.Err(), "read records")
|
||||
}
|
||||
|
||||
refSeries := make(map[uint64]*memSeries, numSeries)
|
||||
for _, shard := range shardedRefSeries {
|
||||
for k, v := range shard {
|
||||
refSeries[k] = v
|
||||
if len(refSeries) == 0 {
|
||||
// We had no exemplar record, so we have to build the map here.
|
||||
refSeries = make(map[uint64]*memSeries, numSeries)
|
||||
for _, shard := range shardedRefSeries {
|
||||
for k, v := range shard {
|
||||
refSeries[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,29 @@ func NewUnorderedMemPostings() *MemPostings {
|
|||
}
|
||||
}
|
||||
|
||||
// Symbols returns an iterator over all unique name and value strings, in order.
|
||||
func (p *MemPostings) Symbols() StringIter {
|
||||
p.mtx.RLock()
|
||||
|
||||
// Add all the strings to a map to de-duplicate.
|
||||
symbols := make(map[string]struct{}, 512)
|
||||
for n, e := range p.m {
|
||||
symbols[n] = struct{}{}
|
||||
for v := range e {
|
||||
symbols[v] = struct{}{}
|
||||
}
|
||||
}
|
||||
p.mtx.RUnlock()
|
||||
|
||||
res := make([]string, 0, len(symbols))
|
||||
for k := range symbols {
|
||||
res = append(res, k)
|
||||
}
|
||||
|
||||
sort.Strings(res)
|
||||
return NewStringListIter(res)
|
||||
}
|
||||
|
||||
// SortedKeys returns a list of sorted label keys of the postings.
|
||||
func (p *MemPostings) SortedKeys() []labels.Label {
|
||||
p.mtx.RLock()
|
||||
|
|
|
@ -182,6 +182,11 @@ func (d *Decoder) Exemplars(rec []byte, exemplars []RefExemplar) ([]RefExemplar,
|
|||
if t != Exemplars {
|
||||
return nil, errors.New("invalid record type")
|
||||
}
|
||||
|
||||
return d.ExemplarsFromBuffer(&dec, exemplars)
|
||||
}
|
||||
|
||||
func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemplar) ([]RefExemplar, error) {
|
||||
if dec.Len() == 0 {
|
||||
return exemplars, nil
|
||||
}
|
||||
|
@ -287,6 +292,12 @@ func (e *Encoder) Exemplars(exemplars []RefExemplar, b []byte) []byte {
|
|||
return buf.Get()
|
||||
}
|
||||
|
||||
e.EncodeExemplarsIntoBuffer(exemplars, &buf)
|
||||
|
||||
return buf.Get()
|
||||
}
|
||||
|
||||
func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encoding.Encbuf) {
|
||||
// Store base timestamp and base reference number of first sample.
|
||||
// All samples encode their timestamp and ref as delta to those.
|
||||
first := exemplars[0]
|
||||
|
@ -305,6 +316,4 @@ func (e *Encoder) Exemplars(exemplars []RefExemplar, b []byte) []byte {
|
|||
buf.PutUvarintStr(l.Value)
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Get()
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package tsdb
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue