Merge pull request #391 from grafana/krajo/merge-jan08-from-upstream

merge jan08 from upstream
This commit is contained in:
George Krajcsovits 2023-01-09 15:20:16 +01:00 committed by GitHub
commit f3d1f7756f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
158 changed files with 5731 additions and 2233 deletions

View file

@ -20,11 +20,11 @@ jobs:
- name: install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
go-version: '<1.19'
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@v3.2.0
uses: golangci/golangci-lint-action@v3.3.1
with:
version: v1.45.2
version: v1.50.1

View file

@ -10,7 +10,13 @@ build:
path: ./cmd/prometheus
- name: promtool
path: ./cmd/promtool
flags: -a -tags netgo,builtinassets
tags:
all:
- netgo
- builtinassets
windows:
- builtinassets
flags: -a
ldflags: |
-X github.com/prometheus/common/version.Version={{.Version}}
-X github.com/prometheus/common/version.Revision={{.Revision}}

View file

@ -1,5 +1,42 @@
# Changelog
## 2.41.0-rc.0 / 2022-12-14
* [FEATURE] Relabeling: Add `keepequal` and `dropequal` relabel actions. #11564
* [FEATURE] Add support for HTTP proxy headers. #11712
* [ENHANCEMENT] Reload private certificates when changed on disk. #11685
* [ENHANCEMENT] Add `max_version` to specify maximum TLS version in `tls_config`. #11685
* [ENHANCEMENT] Add `goos` and `goarch` labels to `prometheus_build_info`. #11685
* [ENHANCEMENT] SD: Add proxy support for EC2 and LightSail SDs #11611
* [ENHANCEMENT] SD: Add new metric `prometheus_sd_file_watcher_errors_total`. #11066
* [ENHANCEMENT] Remote Read: Use a pool to speed up marshalling. #11357
* [ENHANCEMENT] TSDB: Improve handling of tombstoned chunks in iterators. #11632
* [ENHANCEMENT] TSDB: Optimize postings offset table reading. #11535
* [BUGFIX] Scrape: Validate the metric name, label names, and label values after relabeling. #11074
* [BUGFIX] Remote Write receiver and rule manager: Fix error handling. #11727
## 2.40.7 / 2022-12-14
* [BUGFIX] Use Windows native DNS resolver. #11704
* [BUGFIX] TSDB: Fix queries involving negative buckets of native histograms. #11699
## 2.40.6 / 2022-12-09
* [SECURITY] Security upgrade from go and upstream dependencies that include
security fixes to the net/http and os packages. #11691
## 2.40.5 / 2022-12-01
* [BUGFIX] TSDB: Fix queries involving native histograms due to improper reset of iterators. #11643
## 2.40.4 / 2022-11-29
* [SECURITY] Fix basic authentication bypass vulnerability (CVE-2022-46146). GHSA-4v48-4q5m-8vx4
## 2.40.3 / 2022-11-23
* [BUGFIX] TSDB: Fix compaction after a deletion is called. #11623
## 2.40.2 / 2022-11-16
* [BUGFIX] UI: Fix black-on-black metric name color in dark mode. #11572

View file

@ -64,10 +64,10 @@ To add or update a new dependency, use the `go get` command:
```bash
# Pick the latest tagged release.
go install example.com/some/module/pkg@latest
go get example.com/some/module/pkg@latest
# Pick a specific version.
go install example.com/some/module/pkg@vX.Y.Z
go get example.com/some/module/pkg@vX.Y.Z
```
Tidy up the `go.mod` and `go.sum` files:

View file

@ -90,8 +90,10 @@ endif
.PHONY: npm_licenses
npm_licenses: ui-install
@echo ">> bundling npm licenses"
rm -f $(REACT_APP_NPM_LICENSES_TARBALL)
find $(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --transform 's/^/npm_licenses\//' --files-from=-
rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses
ln -s . npm_licenses
find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=-
rm -f npm_licenses
.PHONY: tarball
tarball: npm_licenses common-tarball
@ -107,7 +109,7 @@ plugins/plugins.go: plugins.yml plugins/generate.go
plugins: plugins/plugins.go
.PHONY: build
build: assets npm_licenses assets-compress common-build plugins
build: assets npm_licenses assets-compress plugins common-build
.PHONY: bench_tsdb
bench_tsdb: $(PROMU)

View file

@ -55,13 +55,13 @@ ifneq ($(shell which gotestsum),)
endif
endif
PROMU_VERSION ?= 0.13.0
PROMU_VERSION ?= 0.14.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.49.0
GOLANGCI_LINT_VERSION ?= v1.50.1
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -138,8 +138,6 @@ make npm_licenses
make common-docker-amd64
```
*NB* if you are on a Mac, you will need [gnu-tar](https://formulae.brew.sh/formula/gnu-tar).
## Using Prometheus as a Go Library
### Remote Write

View file

@ -45,7 +45,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.38 | 2022-08-10 | Julius Volz (GitHub: @juliusv) |
| v2.39 | 2022-09-21 | Ganesh Vernekar (GitHub: @codesome) |
| v2.40 | 2022-11-02 | Ganesh Vernekar (GitHub: @codesome) |
| v2.41 | 2022-12-14 | **searching for volunteer** |
| v2.41 | 2022-12-14 | Julien Pivotto (GitHub: @roidelapluie) |
| v2.42 | 2023-01-25 | **searching for volunteer** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View file

@ -1 +1 @@
2.40.2
2.41.0-rc.0

View file

@ -1393,7 +1393,7 @@ func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels,
return 0, tsdb.ErrNotReady
}
func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady
}

View file

@ -49,7 +49,7 @@ func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMa
samples := []backfillSample{}
for ss.Next() {
series := ss.At()
it := series.Iterator()
it := series.Iterator(nil)
require.NoError(t, it.Err())
for it.Next() == chunkenc.ValFloat {
ts, v := it.At()

View file

@ -631,9 +631,9 @@ func checkRules(filename string, lintSettings lintConfig) (int, []error) {
errMessage := fmt.Sprintf("%d duplicate rule(s) found.\n", len(dRules))
for _, n := range dRules {
errMessage += fmt.Sprintf("Metric: %s\nLabel(s):\n", n.metric)
for _, l := range n.label {
n.label.Range(func(l labels.Label) {
errMessage += fmt.Sprintf("\t%s: %s\n", l.Name, l.Value)
}
})
}
errMessage += "Might cause inconsistency while recording expressions"
return 0, []error{fmt.Errorf("%w %s", lintError, errMessage)}

View file

@ -158,14 +158,15 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
// Setting the rule labels after the output of the query,
// so they can override query output.
for _, l := range ruleLabels {
ruleLabels.Range(func(l labels.Label) {
lb.Set(l.Name, l.Value)
}
})
lb.Set(labels.MetricName, ruleName)
lbls := lb.Labels(labels.EmptyLabels())
for _, value := range sample.Values {
if err := app.add(ctx, lb.Labels(nil), timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
if err := app.add(ctx, lbls, timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
return fmt.Errorf("add: %w", err)
}
}

View file

@ -100,7 +100,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
require.Equal(t, 1, len(gRules))
require.Equal(t, "rule1", gRules[0].Name())
require.Equal(t, "ruleExpr", gRules[0].Query().String())
require.Equal(t, 1, len(gRules[0].Labels()))
require.Equal(t, 1, gRules[0].Labels().Len())
group2 := ruleImporter.groups[path2+";group2"]
require.NotNil(t, group2)
@ -109,7 +109,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
require.Equal(t, 2, len(g2Rules))
require.Equal(t, "grp2_rule1", g2Rules[0].Name())
require.Equal(t, "grp2_rule1_expr", g2Rules[0].Query().String())
require.Equal(t, 0, len(g2Rules[0].Labels()))
require.Equal(t, 0, g2Rules[0].Labels().Len())
// Backfill all recording rules then check the blocks to confirm the correct data was created.
errs = ruleImporter.importAll(ctx)
@ -132,14 +132,14 @@ func TestBackfillRuleIntegration(t *testing.T) {
for selectedSeries.Next() {
seriesCount++
series := selectedSeries.At()
if len(series.Labels()) != 3 {
require.Equal(t, 2, len(series.Labels()))
if series.Labels().Len() != 3 {
require.Equal(t, 2, series.Labels().Len())
x := labels.FromStrings("__name__", "grp2_rule1", "name1", "val1")
require.Equal(t, x, series.Labels())
} else {
require.Equal(t, 3, len(series.Labels()))
require.Equal(t, 3, series.Labels().Len())
}
it := series.Iterator()
it := series.Iterator(nil)
for it.Next() == chunkenc.ValFloat {
samplesCount++
ts, v := it.At()

View file

@ -315,7 +315,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
i := 0
for scanner.Scan() && i < n {
m := make(labels.Labels, 0, 10)
m := make([]labels.Label, 0, 10)
r := strings.NewReplacer("\"", "", "{", "", "}", "")
s := r.Replace(scanner.Text())
@ -325,13 +325,12 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
split := strings.Split(labelChunk, ":")
m = append(m, labels.Label{Name: split[0], Value: split[1]})
}
// Order of the k/v labels matters, don't assume we'll always receive them already sorted.
sort.Sort(m)
h := m.Hash()
ml := labels.New(m...) // This sorts by name - order of the k/v labels matters, don't assume we'll always receive them already sorted.
h := ml.Hash()
if _, ok := hashes[h]; ok {
continue
}
mets = append(mets, m)
mets = append(mets, ml)
hashes[h] = struct{}{}
i++
}
@ -470,21 +469,21 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
if err != nil {
return err
}
lbls := labels.Labels{}
chks := []chunks.Meta{}
builder := labels.ScratchBuilder{}
for p.Next() {
if err = ir.Series(p.At(), &lbls, &chks); err != nil {
if err = ir.Series(p.At(), &builder, &chks); err != nil {
return err
}
// Amount of the block time range not covered by this series.
uncovered := uint64(meta.MaxTime-meta.MinTime) - uint64(chks[len(chks)-1].MaxTime-chks[0].MinTime)
for _, lbl := range lbls {
builder.Labels().Range(func(lbl labels.Label) {
key := lbl.Name + "=" + lbl.Value
labelsUncovered[lbl.Name] += uncovered
labelpairsUncovered[key] += uncovered
labelpairsCount[key]++
entries++
}
})
}
if p.Err() != nil {
return p.Err()
@ -589,10 +588,10 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err
nBuckets := 10
histogram := make([]int, nBuckets)
totalChunks := 0
var builder labels.ScratchBuilder
for postingsr.Next() {
lbsl := labels.Labels{}
var chks []chunks.Meta
if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil {
if err := indexr.Series(postingsr.At(), &builder, &chks); err != nil {
return err
}
@ -644,7 +643,7 @@ func dumpSamples(path string, mint, maxt int64) (err error) {
for ss.Next() {
series := ss.At()
lbs := series.Labels()
it := series.Iterator()
it := series.Iterator(nil)
for it.Next() == chunkenc.ValFloat {
ts, val := it.At()
fmt.Printf("%s %g %d\n", lbs, val, ts)

View file

@ -284,8 +284,8 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
for _, a := range ar.ActiveAlerts() {
if a.State == rules.StateFiring {
alerts = append(alerts, labelAndAnnotation{
Labels: append(labels.Labels{}, a.Labels...),
Annotations: append(labels.Labels{}, a.Annotations...),
Labels: a.Labels.Copy(),
Annotations: a.Annotations.Copy(),
})
}
}

View file

@ -80,7 +80,8 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
return cfg, nil
}
for i, v := range cfg.GlobalConfig.ExternalLabels {
b := labels.ScratchBuilder{}
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
newV := os.Expand(v.Value, func(s string) string {
if s == "$" {
return "$"
@ -93,10 +94,10 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
})
if newV != v.Value {
level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV)
v.Value = newV
cfg.GlobalConfig.ExternalLabels[i] = v
}
}
b.Add(v.Name, newV)
})
cfg.GlobalConfig.ExternalLabels = b.Labels()
return cfg, nil
}
@ -112,10 +113,6 @@ func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.
}
if agentMode {
if len(cfg.RemoteWriteConfigs) == 0 {
return nil, errors.New("at least one remote_write target must be specified in agent mode")
}
if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 {
return nil, errors.New("field alerting is not allowed in agent mode")
}
@ -361,13 +358,16 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
for _, l := range gc.ExternalLabels {
if err := gc.ExternalLabels.Validate(func(l labels.Label) error {
if !model.LabelName(l.Name).IsValid() {
return fmt.Errorf("%q is not a valid label name", l.Name)
}
if !model.LabelValue(l.Value).IsValid() {
return fmt.Errorf("%q is not a valid label value", l.Value)
}
return nil
}); err != nil {
return err
}
// First set the correct scrape interval, then check that the timeout
@ -394,7 +394,7 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// isZero returns true iff the global config is the zero value.
func (c *GlobalConfig) isZero() bool {
return c.ExternalLabels == nil &&
return c.ExternalLabels.IsEmpty() &&
c.ScrapeInterval == 0 &&
c.ScrapeTimeout == 0 &&
c.EvaluationInterval == 0 &&

View file

@ -586,6 +586,7 @@ var expectedConf = &Config{
Values: []string{"web", "db"},
},
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
@ -602,12 +603,13 @@ var expectedConf = &Config{
ServiceDiscoveryConfigs: discovery.Configs{
&aws.LightsailSDConfig{
Region: "us-east-1",
AccessKey: "access",
SecretKey: "mysecret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
Region: "us-east-1",
AccessKey: "access",
SecretKey: "mysecret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
@ -1743,6 +1745,33 @@ func TestExpandExternalLabels(t *testing.T) {
require.Equal(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
}
func TestAgentMode(t *testing.T) {
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, log.NewNopLogger())
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, log.NewNopLogger())
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, log.NewNopLogger())
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, log.NewNopLogger())
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger())
require.NoError(t, err)
require.Len(t, c.RemoteWriteConfigs, 0)
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger())
require.NoError(t, err)
require.Len(t, c.RemoteWriteConfigs, 1)
require.Equal(
t,
"http://remote1/push",
c.RemoteWriteConfigs[0].URL.String(),
)
}
func TestEmptyGlobalBlock(t *testing.T) {
c, err := Load("global:\n", false, log.NewNopLogger())
require.NoError(t, err)

2
config/testdata/agent_mode.good.yml vendored Normal file
View file

@ -0,0 +1,2 @@
remote_write:
- url: http://remote1/push

View file

@ -0,0 +1,6 @@
alerting:
alertmanagers:
- scheme: https
static_configs:
- targets:
- "1.2.3.4:9093"

View file

@ -0,0 +1,5 @@
alerting:
alert_relabel_configs:
- action: uppercase
source_labels: [instance]
target_label: instance

View file

@ -0,0 +1,5 @@
remote_read:
- url: http://remote1/read
read_recent: true
name: default
enable_http2: false

View file

@ -0,0 +1,3 @@
rule_files:
- "first.rules"
- "my/*.rules"

View file

@ -0,0 +1,2 @@
global:
scrape_interval: 15s

View file

@ -66,8 +66,9 @@ const (
// DefaultEC2SDConfig is the default EC2 SD configuration.
var DefaultEC2SDConfig = EC2SDConfig{
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
@ -91,6 +92,8 @@ type EC2SDConfig struct {
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Port int `yaml:"port"`
Filters []*EC2Filter `yaml:"filters"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
}
// Name returns the name of the EC2 Config.
@ -171,11 +174,17 @@ func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) {
creds = nil
}
client, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "ec2_sd")
if err != nil {
return nil, err
}
sess, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Endpoint: &d.cfg.Endpoint,
Region: &d.cfg.Region,
Credentials: creds,
HTTPClient: client,
},
Profile: d.cfg.Profile,
})

View file

@ -56,8 +56,9 @@ const (
// DefaultLightsailSDConfig is the default Lightsail SD configuration.
var DefaultLightsailSDConfig = LightsailSDConfig{
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
@ -74,6 +75,8 @@ type LightsailSDConfig struct {
RoleARN string `yaml:"role_arn,omitempty"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Port int `yaml:"port"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
}
// Name returns the name of the Lightsail Config.
@ -144,11 +147,17 @@ func (d *LightsailDiscovery) lightsailClient() (*lightsail.Lightsail, error) {
creds = nil
}
client, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "lightsail_sd")
if err != nil {
return nil, err
}
sess, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Endpoint: &d.cfg.Endpoint,
Region: &d.cfg.Region,
Credentials: creds,
HTTPClient: client,
},
Profile: d.cfg.Profile,
})

View file

@ -55,6 +55,7 @@ const (
azureLabelMachinePublicIP = azureLabel + "machine_public_ip"
azureLabelMachineTag = azureLabel + "machine_tag_"
azureLabelMachineScaleSet = azureLabel + "machine_scale_set"
azureLabelMachineSize = azureLabel + "machine_size"
authMethodOAuth = "OAuth"
authMethodManagedIdentity = "ManagedIdentity"
@ -261,6 +262,7 @@ type virtualMachine struct {
ScaleSet string
Tags map[string]*string
NetworkInterfaces []string
Size string
}
// Create a new azureResource object from an ID string.
@ -343,6 +345,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
azureLabelMachineOSType: model.LabelValue(vm.OsType),
azureLabelMachineLocation: model.LabelValue(vm.Location),
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup),
azureLabelMachineSize: model.LabelValue(vm.Size),
}
if vm.ScaleSet != "" {
@ -514,6 +517,7 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
tags := map[string]*string{}
networkInterfaces := []string{}
var computerName string
var size string
if vm.Tags != nil {
tags = vm.Tags
@ -525,10 +529,13 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
}
}
if vm.VirtualMachineProperties != nil &&
vm.VirtualMachineProperties.OsProfile != nil &&
vm.VirtualMachineProperties.OsProfile.ComputerName != nil {
computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName)
if vm.VirtualMachineProperties != nil {
if vm.VirtualMachineProperties.OsProfile != nil && vm.VirtualMachineProperties.OsProfile.ComputerName != nil {
computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName)
}
if vm.VirtualMachineProperties.HardwareProfile != nil {
size = string(vm.VirtualMachineProperties.HardwareProfile.VMSize)
}
}
return virtualMachine{
@ -541,6 +548,7 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
ScaleSet: "",
Tags: tags,
NetworkInterfaces: networkInterfaces,
Size: size,
}
}
@ -549,6 +557,7 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin
tags := map[string]*string{}
networkInterfaces := []string{}
var computerName string
var size string
if vm.Tags != nil {
tags = vm.Tags
@ -560,8 +569,13 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin
}
}
if vm.VirtualMachineScaleSetVMProperties != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile != nil {
computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName)
if vm.VirtualMachineScaleSetVMProperties != nil {
if vm.VirtualMachineScaleSetVMProperties.OsProfile != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName != nil {
computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName)
}
if vm.VirtualMachineScaleSetVMProperties.HardwareProfile != nil {
size = string(vm.VirtualMachineScaleSetVMProperties.HardwareProfile.VMSize)
}
}
return virtualMachine{
@ -574,6 +588,7 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin
ScaleSet: scaleSetName,
Tags: tags,
NetworkInterfaces: networkInterfaces,
Size: size,
}
}

View file

@ -28,6 +28,7 @@ func TestMain(m *testing.M) {
func TestMapFromVMWithEmptyTags(t *testing.T) {
id := "test"
name := "name"
size := "size"
vmType := "type"
location := "westeurope"
computerName := "computer_name"
@ -44,6 +45,9 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
},
},
NetworkProfile: &networkProfile,
HardwareProfile: &compute.HardwareProfile{
VMSize: compute.VirtualMachineSizeTypes(size),
},
}
testVM := compute.VirtualMachine{
@ -64,6 +68,7 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
OsType: "Linux",
Tags: map[string]*string{},
NetworkInterfaces: []string{},
Size: size,
}
actualVM := mapFromVM(testVM)
@ -74,6 +79,7 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
func TestMapFromVMWithTags(t *testing.T) {
id := "test"
name := "name"
size := "size"
vmType := "type"
location := "westeurope"
computerName := "computer_name"
@ -93,6 +99,9 @@ func TestMapFromVMWithTags(t *testing.T) {
},
},
NetworkProfile: &networkProfile,
HardwareProfile: &compute.HardwareProfile{
VMSize: compute.VirtualMachineSizeTypes(size),
},
}
testVM := compute.VirtualMachine{
@ -113,6 +122,7 @@ func TestMapFromVMWithTags(t *testing.T) {
OsType: "Linux",
Tags: tags,
NetworkInterfaces: []string{},
Size: size,
}
actualVM := mapFromVM(testVM)
@ -123,6 +133,7 @@ func TestMapFromVMWithTags(t *testing.T) {
func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
id := "test"
name := "name"
size := "size"
vmType := "type"
location := "westeurope"
computerName := "computer_name"
@ -139,6 +150,9 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
},
},
NetworkProfile: &networkProfile,
HardwareProfile: &compute.HardwareProfile{
VMSize: compute.VirtualMachineSizeTypes(size),
},
}
testVM := compute.VirtualMachineScaleSetVM{
@ -161,6 +175,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
Tags: map[string]*string{},
NetworkInterfaces: []string{},
ScaleSet: scaleSet,
Size: size,
}
actualVM := mapFromVMScaleSetVM(testVM, scaleSet)
@ -171,6 +186,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
id := "test"
name := "name"
size := "size"
vmType := "type"
location := "westeurope"
computerName := "computer_name"
@ -190,6 +206,9 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
},
},
NetworkProfile: &networkProfile,
HardwareProfile: &compute.HardwareProfile{
VMSize: compute.VirtualMachineSizeTypes(size),
},
}
testVM := compute.VirtualMachineScaleSetVM{
@ -212,6 +231,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
Tags: tags,
NetworkInterfaces: []string{},
ScaleSet: scaleSet,
Size: size,
}
actualVM := mapFromVMScaleSetVM(testVM, scaleSet)

View file

@ -72,7 +72,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
queue: workqueue.NewNamed("endpoints"),
}
e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
epAddCount.Inc()
e.enqueue(o)
@ -86,6 +86,9 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
e.enqueue(o)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err)
}
serviceUpdate := func(o interface{}) {
svc, err := convertToService(o)
@ -106,7 +109,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err)
}
}
e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
// TODO(fabxc): potentially remove add and delete event handlers. Those should
// be triggered via the endpoint handlers already.
AddFunc: func(o interface{}) {
@ -122,8 +125,11 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
serviceUpdate(o)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
}
if e.withNodeMetadata {
e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
node := o.(*apiv1.Node)
e.enqueueNode(node.Name)
@ -137,6 +143,9 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
e.enqueueNode(node.Name)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
}
}
return e

View file

@ -73,7 +73,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
queue: workqueue.NewNamed("endpointSlice"),
}
e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
epslAddCount.Inc()
e.enqueue(o)
@ -87,6 +87,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
e.enqueue(o)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err)
}
serviceUpdate := func(o interface{}) {
svc, err := convertToService(o)
@ -109,7 +112,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
}
}
}
e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
svcAddCount.Inc()
serviceUpdate(o)
@ -123,9 +126,12 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
serviceUpdate(o)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
}
if e.withNodeMetadata {
e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
node := o.(*apiv1.Node)
e.enqueueNode(node.Name)
@ -139,6 +145,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
e.enqueueNode(node.Name)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
}
}
return e

View file

@ -48,7 +48,7 @@ type Ingress struct {
// NewIngress returns a new ingress discovery.
func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress {
s := &Ingress{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("ingress")}
s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
ingressAddCount.Inc()
s.enqueue(o)
@ -62,6 +62,9 @@ func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress {
s.enqueue(o)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding ingresses event handler.", "err", err)
}
return s
}

View file

@ -382,7 +382,8 @@ func mapSelector(rawSelector []SelectorConfig) roleSelector {
return rs
}
const resyncPeriod = 10 * time.Minute
// Disable the informer's resync, which just periodically resends already processed updates and distort SD metrics.
const resyncDisabled = 0
// Run implements the discoverer interface.
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
@ -475,8 +476,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
eps := NewEndpointSlice(
log.With(d.logger, "role", "endpointslice"),
informer,
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
nodeInf,
)
d.discoverers = append(d.discoverers, eps)
@ -534,8 +535,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
eps := NewEndpoints(
log.With(d.logger, "role", "endpoint"),
d.newEndpointsByNodeInformer(elw),
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
nodeInf,
)
d.discoverers = append(d.discoverers, eps)
@ -589,7 +590,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
svc := NewService(
log.With(d.logger, "role", "service"),
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
)
d.discoverers = append(d.discoverers, svc)
go svc.informer.Run(ctx.Done())
@ -627,7 +628,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
return i.Watch(ctx, options)
},
}
informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncPeriod)
informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
} else {
i := d.client.NetworkingV1beta1().Ingresses(namespace)
ilw := &cache.ListWatch{
@ -642,7 +643,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
return i.Watch(ctx, options)
},
}
informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncPeriod)
informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled)
}
ingress := NewIngress(
log.With(d.logger, "role", "ingress"),
@ -732,7 +733,7 @@ func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer {
return d.client.CoreV1().Nodes().Watch(ctx, options)
},
}
return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncPeriod)
return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled)
}
func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
@ -747,13 +748,13 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
}
}
return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncPeriod, indexers)
return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers)
}
func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
indexers := make(map[string]cache.IndexFunc)
if !d.attachMetadata.Node {
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncPeriod, indexers)
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers)
}
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
@ -773,13 +774,13 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
return nodes, nil
}
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncPeriod, indexers)
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers)
}
func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer {
indexers := make(map[string]cache.IndexFunc)
if !d.attachMetadata.Node {
cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncPeriod, indexers)
cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncDisabled, indexers)
}
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
@ -806,7 +807,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
return nodes, nil
}
return cache.NewSharedIndexInformer(plw, object, resyncPeriod, indexers)
return cache.NewSharedIndexInformer(plw, object, resyncDisabled, indexers)
}
func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) {

View file

@ -55,7 +55,7 @@ func NewNode(l log.Logger, inf cache.SharedInformer) *Node {
l = log.NewNopLogger()
}
n := &Node{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("node")}
n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
nodeAddCount.Inc()
n.enqueue(o)
@ -69,6 +69,9 @@ func NewNode(l log.Logger, inf cache.SharedInformer) *Node {
n.enqueue(o)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
}
return n
}

View file

@ -65,7 +65,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
logger: l,
queue: workqueue.NewNamed("pod"),
}
p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
podAddCount.Inc()
p.enqueue(o)
@ -79,9 +79,12 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
p.enqueue(o)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
}
if p.withNodeMetadata {
p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err = p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
node := o.(*apiv1.Node)
p.enqueuePodsForNode(node.Name)
@ -95,6 +98,9 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
p.enqueuePodsForNode(node.Name)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
}
}
return p

View file

@ -51,7 +51,7 @@ func NewService(l log.Logger, inf cache.SharedInformer) *Service {
l = log.NewNopLogger()
}
s := &Service{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("service")}
s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
svcAddCount.Inc()
s.enqueue(o)
@ -65,6 +65,9 @@ func NewService(l log.Logger, inf cache.SharedInformer) *Service {
s.enqueue(o)
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
}
return s
}

View file

@ -208,6 +208,10 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# List of Azure service discovery configurations.
azure_sd_configs:
@ -389,6 +393,11 @@ A `tls_config` allows configuring TLS connections.
# If unset, Prometheus will use Go default minimum version, which is TLS 1.2.
# See MinVersion in https://pkg.go.dev/crypto/tls#Config.
[ min_version: <string> ]
# Maximum acceptable TLS version. Accepted values: TLS10 (TLS 1.0), TLS11 (TLS
# 1.1), TLS12 (TLS 1.2), TLS13 (TLS 1.3).
# If unset, Prometheus will use Go default maximum version, which is TLS 1.3.
# See MaxVersion in https://pkg.go.dev/crypto/tls#Config.
[ max_version: <string> ]
```
### `<oauth2>`
@ -422,6 +431,9 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
```
### `<azure_sd_config>`
@ -440,6 +452,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_azure_machine_resource_group`: the machine's resource group
* `__meta_azure_machine_tag_<tagname>`: each tag value of the machine
* `__meta_azure_machine_scale_set`: the name of the scale set which the vm is part of (this value is only set if you are using a [scale set](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/))
* `__meta_azure_machine_size`: the machine size
* `__meta_azure_subscription_id`: the subscription ID
* `__meta_azure_tenant_id`: the tenant ID
@ -500,6 +513,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -602,6 +618,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -680,6 +699,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -729,6 +751,9 @@ host: <string>
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# TLS configuration.
tls_config:
@ -895,6 +920,9 @@ host: <string>
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# TLS configuration.
tls_config:
@ -1064,6 +1092,48 @@ See below for the configuration options for EC2 discovery:
filters:
[ - name: <string>
values: <string>, [...] ]
# Authentication information used to authenticate to the EC2 API.
# Note that `basic_auth`, `authorization` and `oauth2` options are
# mutually exclusive.
# `password` and `password_file` are mutually exclusive.
# Optional HTTP basic authentication information, currently not supported by AWS.
basic_auth:
[ username: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
# Optional `Authorization` header configuration, currently not supported by AWS.
authorization:
# Sets the authentication type.
[ type: <string> | default: Bearer ]
# Sets the credentials. It is mutually exclusive with
# `credentials_file`.
[ credentials: <secret> ]
# Sets the credentials to the credentials read from the configured file.
# It is mutuall exclusive with `credentials`.
[ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration, currently not supported by AWS.
oauth2:
[ <oauth2> ]
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
# Whether to enable HTTP2.
[ enable_http2: <bool> | default: true ]
# TLS configuration.
tls_config:
[ <tls_config> ]
```
The [relabeling phase](#relabel_config) is the preferred and more powerful
@ -1320,6 +1390,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -1528,6 +1601,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -1613,6 +1689,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -1687,6 +1766,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -1900,6 +1982,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -1977,6 +2062,9 @@ server: <string>
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# TLS configuration.
tls_config:
@ -2066,6 +2154,48 @@ See below for the configuration options for Lightsail discovery:
# The port to scrape metrics from. If using the public IP address, this must
# instead be specified in the relabeling rule.
[ port: <int> | default = 80 ]
# Authentication information used to authenticate to the Lightsail API.
# Note that `basic_auth`, `authorization` and `oauth2` options are
# mutually exclusive.
# `password` and `password_file` are mutually exclusive.
# Optional HTTP basic authentication information, currently not supported by AWS.
basic_auth:
[ username: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
# Optional `Authorization` header configuration, currently not supported by AWS.
authorization:
# Sets the authentication type.
[ type: <string> | default: Bearer ]
# Sets the credentials. It is mutually exclusive with
# `credentials_file`.
[ credentials: <secret> ]
# Sets the credentials to the credentials read from the configured file.
# It is mutuall exclusive with `credentials`.
[ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration, currently not supported by AWS.
oauth2:
[ <oauth2> ]
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
# Whether to enable HTTP2.
[ enable_http2: <bool> | default: true ]
# TLS configuration.
tls_config:
[ <tls_config> ]
```
### `<linode_sd_config>`
@ -2128,6 +2258,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -2227,6 +2360,9 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
```
By default every app listed in Marathon will be scraped by Prometheus. If not all
@ -2317,6 +2453,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -2494,6 +2633,9 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -2610,6 +2752,9 @@ tags_filter:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# TLS configuration.
tls_config:
@ -2677,6 +2822,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -2750,6 +2898,9 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -2943,6 +3094,9 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -3152,6 +3306,9 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]
@ -3256,6 +3413,9 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_headers:
[ <string>: [<secret>, ...] ] ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <boolean> | default = true ]

View file

@ -132,6 +132,9 @@ annotations:
[ <labelname>: <tmpl_string> ]
```
See also the
[best practices for naming metrics created by recording rules](https://prometheus.io/docs/practices/rules/#recording-rules).
# Limiting alerts and series
A limit for alerts produced by alerting rules and series produced recording rules

View file

@ -77,14 +77,17 @@ series: <string>
# This uses expanding notation.
# Expanding notation:
# 'a+bxc' becomes 'a a+b a+(2*b) a+(3*b) … a+(c*b)'
# Read this as series starts at a, then c further samples incrementing by b.
# 'a-bxc' becomes 'a a-b a-(2*b) a-(3*b) … a-(c*b)'
# Read this as series starts at a, then c further samples decrementing by b (or incrementing by negative b).
# There are special values to indicate missing and stale samples:
# '_' represents a missing sample from scrape
# 'stale' indicates a stale sample
# Examples:
# 1. '-2+4x3' becomes '-2 2 6 10'
# 2. ' 1-2x4' becomes '1 -1 -3 -5 -7'
# 3. ' 1 _x3 stale' becomes '1 _ _ _ stale'
# 1. '-2+4x3' becomes '-2 2 6 10' - series starts at -2, then 3 further samples incrementing by 4.
# 2. ' 1-2x4' becomes '1 -1 -3 -5 -7' - series starts at 1, then 4 further samples decrementing by 2.
# 3. ' 1x4' becomes '1 1 1 1 1' - shorthand for '1+0x4', series starts at 1, then 4 further samples incrementing by 0.
# 4. ' 1 _x3 stale' becomes '1 _ _ _ stale' - the missing sample cannot increment, so 3 missing samples are produced by the '_x3' expression.
values: <string>
```

View file

@ -623,6 +623,38 @@ $ curl 'http://localhost:9090/api/v1/targets?state=active'
}
```
The `scrapePool` query parameter allows the caller to filter by scrape pool name.
```json
$ curl 'http://localhost:9090/api/v1/targets?scrapePool=node_exporter'
{
"status": "success",
"data": {
"activeTargets": [
{
"discoveredLabels": {
"__address__": "127.0.0.1:9091",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"job": "node_exporter"
},
"labels": {
"instance": "127.0.0.1:9091",
"job": "node_exporter"
},
"scrapePool": "node_exporter",
"scrapeUrl": "http://127.0.0.1:9091/metrics",
"globalUrl": "http://example-prometheus:9091/metrics",
"lastError": "",
"lastScrape": "2017-01-17T15:07:44.723715405+01:00",
"lastScrapeDuration": 50688943,
"health": "up"
}
],
"droppedTargets": []
}
}
```
## Rules

View file

@ -5,7 +5,7 @@ sort_rank: 7
# Remote Read API
This is not currently considered par of the stable API and is subject to change
This is not currently considered part of the stable API and is subject to change
even between non-major version releases of Prometheus.
## Format overview
@ -65,8 +65,6 @@ Note: Names of query parameters that may be repeated end with `[]`.
This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression.
The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto).
/// Can you clarify what you mean by this?
/// https://github.com/prometheus/prometheus/pull/7266#discussion_r426456791 Can we talk a little bit how negotiation works of sampled vs streamed ?
Request are made to the following endpoint.
```
@ -74,12 +72,10 @@ Request are made to the following endpoint.
```
### Samples
/// Does it return a message that includes a list, or does it return a list of raw samples?
This returns a message that includes a list of raw samples.
### Streamed Chunks
/// This is a little much detail, the relevant point is they're the internal implementation of the chunks.
These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf)
compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second.

View file

@ -21,6 +21,7 @@ require (
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dennwc/varint v1.0.0 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-logr/logr v1.2.3 // indirect

View file

@ -38,7 +38,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/digitalocean/godo v1.82.0 h1:lqAit46H1CqJGjh7LDbsamng/UMBME5rvmfH3Vb5Yy8=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=

113
go.mod
View file

@ -7,15 +7,15 @@ require (
github.com/Azure/go-autorest/autorest v0.11.28
github.com/Azure/go-autorest/autorest/adal v0.9.21
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
github.com/aws/aws-sdk-go v1.44.131
github.com/cespare/xxhash/v2 v2.1.2
github.com/aws/aws-sdk-go v1.44.159
github.com/cespare/xxhash/v2 v2.2.0
github.com/dennwc/varint v1.0.0
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
github.com/digitalocean/godo v1.89.0
github.com/digitalocean/godo v1.91.1
github.com/docker/docker v20.10.21+incompatible
github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.10.3
github.com/envoyproxy/protoc-gen-validate v0.8.0
github.com/envoyproxy/protoc-gen-validate v0.9.1
github.com/fsnotify/fsnotify v1.6.0
github.com/go-kit/log v0.2.1
github.com/go-logfmt/logfmt v0.5.1
@ -23,13 +23,13 @@ require (
github.com/go-zookeeper/zk v1.0.3
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e
github.com/gophercloud/gophercloud v1.0.0
github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6
github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93
github.com/gophercloud/gophercloud v1.1.1
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.15.3
github.com/hashicorp/nomad/api v0.0.0-20221102143410-8a95f1239005
github.com/hetznercloud/hcloud-go v1.35.3
github.com/hashicorp/consul/api v1.18.0
github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d
github.com/hetznercloud/hcloud-go v1.39.0
github.com/ionos-cloud/sdk-go/v6 v6.1.3
github.com/json-iterator/go v1.1.12
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
@ -41,53 +41,53 @@ require (
github.com/ovh/go-ovh v1.3.0
github.com/pkg/errors v0.9.1
github.com/prometheus/alertmanager v0.24.0
github.com/prometheus/client_golang v1.13.1
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_model v0.3.0
github.com/prometheus/common v0.37.0
github.com/prometheus/common v0.39.0
github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.8.1
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9
github.com/prometheus/exporter-toolkit v0.8.2
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
github.com/stretchr/testify v1.8.1
github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4
go.opentelemetry.io/otel v1.11.1
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.1
go.opentelemetry.io/otel/sdk v1.11.1
go.opentelemetry.io/otel/trace v1.11.1
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0
go.opentelemetry.io/otel v1.11.2
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2
go.opentelemetry.io/otel/sdk v1.11.2
go.opentelemetry.io/otel/trace v1.11.2
go.uber.org/atomic v1.10.0
go.uber.org/automaxprocs v1.5.1
go.uber.org/goleak v1.2.0
golang.org/x/net v0.1.0
golang.org/x/oauth2 v0.1.0
golang.org/x/net v0.4.0
golang.org/x/oauth2 v0.3.0
golang.org/x/sync v0.1.0
golang.org/x/sys v0.1.0
golang.org/x/time v0.1.0
golang.org/x/tools v0.2.0
google.golang.org/api v0.102.0
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c
google.golang.org/grpc v1.50.1
golang.org/x/sys v0.3.0
golang.org/x/time v0.3.0
golang.org/x/tools v0.4.0
google.golang.org/api v0.104.0
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37
google.golang.org/grpc v1.51.0
google.golang.org/protobuf v1.28.1
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.25.3
k8s.io/apimachinery v0.25.3
k8s.io/client-go v0.25.3
k8s.io/api v0.26.0
k8s.io/apimachinery v0.26.0
k8s.io/client-go v0.26.0
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.80.0
k8s.io/klog/v2 v2.80.1
)
require (
cloud.google.com/go/compute/metadata v0.2.1 // indirect
cloud.google.com/go/compute/metadata v0.2.2 // indirect
github.com/coreos/go-systemd/v22 v22.4.0 // indirect
)
require (
cloud.google.com/go/compute v1.12.1 // indirect
cloud.google.com/go/compute v1.13.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
@ -95,30 +95,28 @@ require (
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/Microsoft/go-winio v0.5.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
github.com/armon/go-metrics v0.3.10 // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-kit/kit v0.10.0 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.21.2 // indirect
github.com/go-openapi/errors v0.20.2 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/loads v0.21.1 // indirect
github.com/go-openapi/spec v0.20.4 // indirect
github.com/go-openapi/swag v0.21.1 // indirect
@ -134,17 +132,17 @@ require (
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
github.com/googleapis/gax-go/v2 v2.6.0 // indirect
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect
github.com/hashicorp/cronexpr v1.1.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v0.14.1 // indirect
github.com/hashicorp/go-immutable-radix v1.3.0 // indirect
github.com/hashicorp/go-hclog v0.16.2 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/serf v0.9.7 // indirect
github.com/hashicorp/serf v0.10.1 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
@ -153,7 +151,7 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
@ -168,23 +166,22 @@ require (
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
go.mongodb.org/mongo-driver v1.10.2 // indirect
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 // indirect
go.opentelemetry.io/otel/metric v0.33.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect
go.opentelemetry.io/otel/metric v0.34.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326
golang.org/x/mod v0.6.0 // indirect
golang.org/x/term v0.1.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/exp v0.0.0-20221212164502-fae10dda9338
golang.org/x/mod v0.7.0 // indirect
golang.org/x/term v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.66.6 // indirect
gotest.tools/v3 v3.0.3 // indirect
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 // indirect
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

299
go.sum
View file

@ -3,7 +3,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
@ -13,9 +12,6 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
@ -23,13 +19,13 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU=
cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k=
cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/longrunning v0.1.1 h1:y50CXG4j0+qvEukslYFBCrzaXX0qpFbBzc3PchSu/LE=
cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@ -39,7 +35,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@ -72,9 +67,7 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
@ -105,8 +98,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.131 h1:kd61x79ax0vyiC/SZ9X1hKh8E0pt1BUOOcVBJEFhxkg=
github.com/aws/aws-sdk-go v1.44.131/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.159 h1:9odtuHAYQE9tQKyuX6ny1U1MHeH5/yzeCJi96g9H4DU=
github.com/aws/aws-sdk-go v1.44.159/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -116,14 +109,15 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@ -132,7 +126,6 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
@ -160,11 +153,11 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.89.0 h1:UL3Ii4qfk86m4qEKg2iSwop0puvgOCKvwzXvwArU05E=
github.com/digitalocean/godo v1.89.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA=
github.com/digitalocean/godo v1.91.1 h1:1o30VOCu1aC6488qBd0SkQiBeAZ35RSTvLwCA1pQMhc=
github.com/digitalocean/godo v1.91.1/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@ -180,13 +173,12 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
@ -194,8 +186,8 @@ github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGY
github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
github.com/envoyproxy/protoc-gen-validate v0.8.0 h1:eZxAlfY5c/HTcV7aN9EUL3Ej/zY/WDmawwClR16nfDA=
github.com/envoyproxy/protoc-gen-validate v0.8.0/go.mod h1:z+FSjkCuAJYqUS2daO/NBFgbCao8JDHcYcpnFfD00cI=
github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY=
github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@ -217,8 +209,9 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4=
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
@ -242,8 +235,9 @@ github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0=
github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
github.com/go-openapi/runtime v0.23.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk=
@ -268,7 +262,6 @@ github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhY
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
@ -372,7 +365,6 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@ -380,11 +372,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e h1:F1LLQqQ8WoIbyoxLUY+JUZe1kuHdxThM6CPUATzE6Io=
github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 h1:D5iJJZKAi0rU4e/5E58BkrnN+xeCDjAIqcm1GGxAGSI=
github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -395,11 +384,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbez
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU=
github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k=
github.com/gophercloud/gophercloud v1.0.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c=
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
github.com/gophercloud/gophercloud v1.1.1 h1:MuGyqbSxiuVBqkPZ3+Nhbytk1xZxhmfCB2Rg1cJWFWM=
github.com/gophercloud/gophercloud v1.1.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -407,8 +395,8 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 h1:A3dhViTeFDSQcGOXuUi6ukCQSMyDtDISBp2z6OOo2YM=
github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@ -418,11 +406,11 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4Zs
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQurQmvGWmwn2bBPM9c=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.15.3 h1:WYONYL2rxTXtlekAqblR2SCdJsizMDIj/uXb5wNy9zU=
github.com/hashicorp/consul/api v1.15.3/go.mod h1:/g/qgcoBcEXALCNZgRRisyTW0nY86++L0KbeAMXYCeY=
github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g=
github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4=
github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw=
github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU=
github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE=
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
@ -433,18 +421,16 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE=
github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ=
github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
@ -460,6 +446,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -469,21 +457,20 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM=
github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/nomad/api v0.0.0-20221102143410-8a95f1239005 h1:jKwXhVS4F7qk0g8laz+Anz0g/6yaSJ3HqmSAuSNLUcA=
github.com/hashicorp/nomad/api v0.0.0-20221102143410-8a95f1239005/go.mod h1:vgJmrz4Bz9E1cR/uy70oP9udUJKFRkcEYHlHTp4nFwI=
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d h1:kEWrUx7mld3c6HRcO2KhfD1MYBkofuZfEfDwCRQ9aMU=
github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d/go.mod h1:8FB4gnSJAfRGxfG+v0pZEPfqhZG7nZ87xDeUyw3gEMI=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY=
github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow=
github.com/hetznercloud/hcloud-go v1.35.3/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go v1.39.0 h1:RUlzI458nGnPR6dlcZlrsGXYC1hQlFbKdm8tVtEQQB0=
github.com/hetznercloud/hcloud-go v1.39.0/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
@ -544,7 +531,6 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U
github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ=
github.com/linode/linodego v1.9.3/go.mod h1:h6AuFR/JpqwwM/vkj7s8KV3iGN8/jxn+zc437F8SZ8w=
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@ -570,8 +556,8 @@ github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
@ -629,9 +615,9 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@ -663,7 +649,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
@ -679,8 +664,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.1 h1:3gMjIY2+/hzmqhtUC/aQNYldJA6DtH3CgQvwS+02K1c=
github.com/prometheus/client_golang v1.13.1/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -697,15 +682,15 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
github.com/prometheus/exporter-toolkit v0.8.1 h1:TpKt8z55q1zF30BYaZKqh+bODY0WtByHDOhDA2M9pEs=
github.com/prometheus/exporter-toolkit v0.8.1/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0=
github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM=
github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -727,11 +712,11 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shoenig/test v0.4.3 h1:3+CjrpqCwtL08S0wZQilu9WWR/S2CdsLKhHjbJqPj/I=
github.com/shoenig/test v0.4.6 h1:S1pAVs5L1TSRen3N1YQNtBZIh9Z6d1PyQSUDUweMTqk=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
@ -753,10 +738,8 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@ -820,27 +803,26 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 h1:aUEBEdCa6iamGzg6fuYxDA8ThxvOG240mAvWDU+XLio=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4/go.mod h1:l2MdsbKTocpPS5nQZscqTR9jd8u96VYZdcpF8Sye7mA=
go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4=
go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 h1:X2GndnMCsUPh6CiY2a+frAbNsXaPLbB0soHRYhAZ5Ig=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1/go.mod h1:i8vjiSzbiUC7wOQplijSXMYUpNM93DtlS5CbUT+C6oQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1 h1:MEQNafcNCB0uQIti/oHgU7CZpUMYQ7qigBwMVKycHvc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1/go.mod h1:19O5I2U5iys38SsmT2uDJja/300woyzE1KPIQxEUBUc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1 h1:LYyG/f1W/jzAix16jbksJfMQFpOH/Ma6T639pVPMgfI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1/go.mod h1:QrRRQiY3kzAoYPNLP0W/Ikg0gR6V3LMc+ODSxr7yyvg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.1 h1:tFl63cpAAcD9TOU6U8kZU7KyXuSRYAZlbx1C61aaB74=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.1/go.mod h1:X620Jww3RajCJXw/unA+8IRTgxkdS7pi+ZwK9b7KUJk=
go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E=
go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI=
go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs=
go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys=
go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ=
go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U=
go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0=
go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2 h1:Us8tbCmuN16zAnK5TC69AtODLycKbwnskQzaB6DfFhc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2/go.mod h1:GZWSQQky8AgdJj50r1KJm8oiQiIPaAX7uZCFQX9GzC8=
go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8=
go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8=
go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU=
go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU=
go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0=
go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
@ -872,13 +854,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -891,8 +871,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 h1:QfTh0HpN6hlw6D3vu8DAwC8pBIwikq0AI1evdm+FksE=
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 h1:OvjRkcNHnf6/W5FZXSxODbxwD+X7fspczG7Jn/xQVD4=
golang.org/x/exp v0.0.0-20221212164502-fae10dda9338/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -905,7 +885,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
@ -916,14 +895,12 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -959,10 +936,7 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
@ -976,22 +950,18 @@ golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y=
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1053,20 +1023,14 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -1080,31 +1044,34 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -1156,26 +1123,18 @@ golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@ -1191,11 +1150,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I=
google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg=
google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1234,18 +1190,11 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo=
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70=
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1262,17 +1211,14 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1296,7 +1242,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@ -1337,21 +1282,21 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ=
k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI=
k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc=
k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0=
k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I=
k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg=
k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8=
k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg=
k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s=
k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View file

@ -244,6 +244,37 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
return h
}
// Equals returns true if the given float histogram matches exactly.
// Exact match is when there are no new buckets (even empty) and no missing buckets,
// and all the bucket values match. Spans can have different empty length spans in between,
// but they must represent the same bucket layout to match.
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
if h2 == nil {
return false
}
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum {
return false
}
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
return false
}
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
return false
}
if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
return true
}
// addBucket takes the "coordinates" of the last bucket that was handled and
// adds the provided bucket after it. If a corresponding bucket exists, the
// count is added. If not, the bucket is inserted. The updated slices and the

View file

@ -25,14 +25,14 @@ type BucketCount interface {
float64 | uint64
}
// internalBucketCount is used internally by Histogram and FloatHistogram. The
// InternalBucketCount is used internally by Histogram and FloatHistogram. The
// difference to the BucketCount above is that Histogram internally uses deltas
// between buckets rather than absolute counts (while FloatHistogram uses
// absolute counts directly). Go type parameters don't allow type
// specialization. Therefore, where special treatment of deltas between buckets
// vs. absolute counts is important, this information has to be provided as a
// separate boolean parameter "deltaBuckets"
type internalBucketCount interface {
type InternalBucketCount interface {
float64 | int64
}
@ -86,7 +86,7 @@ type BucketIterator[BC BucketCount] interface {
// implementations, together with an implementation of the At method. This
// iterator can be embedded in full implementations of BucketIterator to save on
// code replication.
type baseBucketIterator[BC BucketCount, IBC internalBucketCount] struct {
type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct {
schema int32
spans []Span
buckets []IBC
@ -121,7 +121,7 @@ func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] {
// compactBuckets is a generic function used by both Histogram.Compact and
// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are
// deltas. Set it to false if the buckets contain absolute counts.
func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) {
func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) {
// Fast path: If there are no empty buckets AND no offset in any span is
// <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return
// immediately. We check that first because it's cheap and presumably
@ -327,6 +327,18 @@ func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmp
return buckets, spans
}
func bucketsMatch[IBC InternalBucketCount](b1, b2 []IBC) bool {
if len(b1) != len(b2) {
return false
}
for i, b := range b1 {
if b != b2[i] {
return false
}
}
return true
}
func getBound(idx, schema int32) float64 {
// Here a bit of context about the behavior for the last bucket counting
// regular numbers (called simply "last bucket" below) and the bucket

View file

@ -250,18 +250,6 @@ func allEmptySpans(s []Span) bool {
return true
}
func bucketsMatch(b1, b2 []int64) bool {
if len(b1) != len(b2) {
return false
}
for i, b := range b1 {
if b != b2[i] {
return false
}
}
return true
}
// Compact works like FloatHistogram.Compact. See there for detailed
// explanations.
func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram {

View file

@ -411,6 +411,7 @@ func TestHistogramToFloat(t *testing.T) {
require.Equal(t, h.String(), fh.String())
}
// TestHistogramMatches tests both Histogram and FloatHistogram.
func TestHistogramMatches(t *testing.T) {
h1 := Histogram{
Schema: 3,
@ -430,14 +431,28 @@ func TestHistogramMatches(t *testing.T) {
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
}
equals := func(h1, h2 Histogram) {
require.True(t, h1.Equals(&h2))
require.True(t, h2.Equals(&h1))
h1f, h2f := h1.ToFloat(), h2.ToFloat()
require.True(t, h1f.Equals(h2f))
require.True(t, h2f.Equals(h1f))
}
notEquals := func(h1, h2 Histogram) {
require.False(t, h1.Equals(&h2))
require.False(t, h2.Equals(&h1))
h1f, h2f := h1.ToFloat(), h2.ToFloat()
require.False(t, h1f.Equals(h2f))
require.False(t, h2f.Equals(h1f))
}
h2 := h1.Copy()
require.True(t, h1.Equals(h2))
equals(h1, *h2)
// Changed spans but same layout.
h2.PositiveSpans = append(h2.PositiveSpans, Span{Offset: 5})
h2.NegativeSpans = append(h2.NegativeSpans, Span{Offset: 2})
require.True(t, h1.Equals(h2))
require.True(t, h2.Equals(&h1))
equals(h1, *h2)
// Adding empty spans in between.
h2.PositiveSpans[1].Offset = 6
h2.PositiveSpans = []Span{
@ -455,58 +470,57 @@ func TestHistogramMatches(t *testing.T) {
h2.NegativeSpans[1],
h2.NegativeSpans[2],
}
require.True(t, h1.Equals(h2))
require.True(t, h2.Equals(&h1))
equals(h1, *h2)
// All mismatches.
require.False(t, h1.Equals(nil))
notEquals(h1, Histogram{})
h2.Schema = 1
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
h2 = h1.Copy()
h2.Count++
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
h2 = h1.Copy()
h2.Sum++
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
h2 = h1.Copy()
h2.ZeroThreshold++
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
h2 = h1.Copy()
h2.ZeroCount++
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
// Changing value of buckets.
h2 = h1.Copy()
h2.PositiveBuckets[len(h2.PositiveBuckets)-1]++
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
h2 = h1.Copy()
h2.NegativeBuckets[len(h2.NegativeBuckets)-1]++
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
// Changing bucket layout.
h2 = h1.Copy()
h2.PositiveSpans[1].Offset++
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
h2 = h1.Copy()
h2.NegativeSpans[1].Offset++
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
// Adding an empty bucket.
h2 = h1.Copy()
h2.PositiveSpans[0].Offset--
h2.PositiveSpans[0].Length++
h2.PositiveBuckets = append([]int64{0}, h2.PositiveBuckets...)
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
h2 = h1.Copy()
h2.NegativeSpans[0].Offset--
h2.NegativeSpans[0].Length++
h2.NegativeBuckets = append([]int64{0}, h2.NegativeBuckets...)
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
// Adding new bucket.
h2 = h1.Copy()
@ -515,14 +529,14 @@ func TestHistogramMatches(t *testing.T) {
Length: 1,
})
h2.PositiveBuckets = append(h2.PositiveBuckets, 1)
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
h2 = h1.Copy()
h2.NegativeSpans = append(h2.NegativeSpans, Span{
Offset: 1,
Length: 1,
})
h2.NegativeBuckets = append(h2.NegativeBuckets, 1)
require.False(t, h1.Equals(h2))
notEquals(h1, *h2)
}
func TestHistogramCompact(t *testing.T) {

View file

@ -20,6 +20,7 @@ import (
"strconv"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/common/model"
)
// Well-known label names used by Prometheus components.
@ -134,6 +135,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels {
}
// Hash returns a hash value for the label set.
// Note: the result is not guaranteed to be consistent across different runs of Prometheus.
func (ls Labels) Hash() uint64 {
// Use xxhash.Sum64(b) for fast path as it's faster.
b := make([]byte, 0, 1024)
@ -311,6 +313,19 @@ func (ls Labels) WithoutEmpty() Labels {
return ls
}
// IsValid checks if the metric name or label names are valid.
func (ls Labels) IsValid() bool {
for _, l := range ls {
if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) {
return false
}
if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() {
return false
}
}
return true
}
// Equal returns whether the two label sets are equal.
func Equal(ls, o Labels) bool {
if len(ls) != len(o) {
@ -342,9 +357,7 @@ func EmptyLabels() Labels {
// The caller has to guarantee that all label names are unique.
func New(ls ...Label) Labels {
set := make(Labels, 0, len(ls))
for _, l := range ls {
set = append(set, l)
}
set = append(set, ls...)
sort.Sort(set)
return set
@ -399,6 +412,49 @@ func Compare(a, b Labels) int {
return len(a) - len(b)
}
// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
func (ls *Labels) CopyFrom(b Labels) {
(*ls) = append((*ls)[:0], b...)
}
// IsEmpty returns true if ls represents an empty set of labels.
func (ls Labels) IsEmpty() bool {
return len(ls) == 0
}
// Range calls f on each label.
func (ls Labels) Range(f func(l Label)) {
for _, l := range ls {
f(l)
}
}
// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration.
func (ls Labels) Validate(f func(l Label) error) error {
for _, l := range ls {
if err := f(l); err != nil {
return err
}
}
return nil
}
// InternStrings calls intern on every string value inside ls, replacing them with what it returns.
func (ls *Labels) InternStrings(intern func(string) string) {
for i, l := range *ls {
(*ls)[i].Name = intern(l.Name)
(*ls)[i].Value = intern(l.Value)
}
}
// ReleaseStrings calls release on every string value inside ls.
func (ls Labels) ReleaseStrings(release func(string)) {
for _, l := range ls {
release(l.Name)
release(l.Value)
}
}
// Builder allows modifying Labels.
type Builder struct {
base Labels
@ -455,7 +511,7 @@ Outer:
return b
}
// Set the name/value pair as a label.
// Set the name/value pair as a label. A value of "" means delete that label.
func (b *Builder) Set(n, v string) *Builder {
if v == "" {
// Empty labels are the same as missing labels.
@ -510,3 +566,40 @@ Outer:
}
return res
}
// ScratchBuilder allows efficient construction of a Labels from scratch.
type ScratchBuilder struct {
add Labels
}
// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries.
func NewScratchBuilder(n int) ScratchBuilder {
return ScratchBuilder{add: make([]Label, 0, n)}
}
func (b *ScratchBuilder) Reset() {
b.add = b.add[:0]
}
// Add a name/value pair.
// Note if you Add the same name twice you will get a duplicate label, which is invalid.
func (b *ScratchBuilder) Add(name, value string) {
b.add = append(b.add, Label{Name: name, Value: value})
}
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
sort.Sort(b.add)
}
// Asssign is for when you already have a Labels which you want this ScratchBuilder to return.
func (b *ScratchBuilder) Assign(ls Labels) {
b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice.
}
// Return the name/value pairs added so far as a Labels object.
// Note: if you want them sorted, call Sort() first.
func (b *ScratchBuilder) Labels() Labels {
// Copy the slice, so the next use of ScratchBuilder doesn't overwrite.
return append([]Label{}, b.add...)
}

View file

@ -36,10 +36,6 @@ func TestLabels_String(t *testing.T) {
lables: Labels{},
expected: "{}",
},
{
lables: nil,
expected: "{}",
},
}
for _, c := range cases {
str := c.lables.String()
@ -216,6 +212,62 @@ func TestLabels_WithoutEmpty(t *testing.T) {
}
}
func TestLabels_IsValid(t *testing.T) {
for _, test := range []struct {
input Labels
expected bool
}{
{
input: FromStrings(
"__name__", "test",
"hostname", "localhost",
"job", "check",
),
expected: true,
},
{
input: FromStrings(
"__name__", "test:ms",
"hostname_123", "localhost",
"_job", "check",
),
expected: true,
},
{
input: FromStrings("__name__", "test-ms"),
expected: false,
},
{
input: FromStrings("__name__", "0zz"),
expected: false,
},
{
input: FromStrings("abc:xyz", "invalid"),
expected: false,
},
{
input: FromStrings("123abc", "invalid"),
expected: false,
},
{
input: FromStrings("中文abc", "invalid"),
expected: false,
},
{
input: FromStrings("invalid", "aa\xe2"),
expected: false,
},
{
input: FromStrings("invalid", "\xF7\xBF\xBF\xBF"),
expected: false,
},
} {
t.Run("", func(t *testing.T) {
require.Equal(t, test.expected, test.input.IsValid())
})
}
}
func TestLabels_Equal(t *testing.T) {
labels := FromStrings(
"aaa", "111",
@ -260,18 +312,18 @@ func TestLabels_Equal(t *testing.T) {
func TestLabels_FromStrings(t *testing.T) {
labels := FromStrings("aaa", "111", "bbb", "222")
expected := Labels{
{
Name: "aaa",
Value: "111",
},
{
Name: "bbb",
Value: "222",
},
}
require.Equal(t, expected, labels, "unexpected labelset")
x := 0
labels.Range(func(l Label) {
switch x {
case 0:
require.Equal(t, Label{Name: "aaa", Value: "111"}, l, "unexpected value")
case 1:
require.Equal(t, Label{Name: "bbb", Value: "222"}, l, "unexpected value")
default:
t.Fatalf("unexpected labelset value %d: %v", x, l)
}
x++
})
require.Panics(t, func() { FromStrings("aaa", "111", "bbb") }) //nolint:staticcheck // Ignore SA5012, error is intentional test.
}
@ -483,7 +535,6 @@ func TestBuilder(t *testing.T) {
want: FromStrings("aaa", "111", "ccc", "333"),
},
{
base: nil,
set: []Label{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}},
del: []string{"bbb"},
want: FromStrings("aaa", "111", "ccc", "333"),
@ -545,11 +596,49 @@ func TestBuilder(t *testing.T) {
}
}
func TestScratchBuilder(t *testing.T) {
for i, tcase := range []struct {
add []Label
want Labels
}{
{
add: []Label{},
want: EmptyLabels(),
},
{
add: []Label{{"aaa", "111"}},
want: FromStrings("aaa", "111"),
},
{
add: []Label{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}},
want: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"),
},
{
add: []Label{{"bbb", "222"}, {"aaa", "111"}, {"ccc", "333"}},
want: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"),
},
{
add: []Label{{"ddd", "444"}},
want: FromStrings("ddd", "444"),
},
} {
t.Run(fmt.Sprint(i), func(t *testing.T) {
b := ScratchBuilder{}
for _, lbl := range tcase.add {
b.Add(lbl.Name, lbl.Value)
}
b.Sort()
require.Equal(t, tcase.want, b.Labels())
b.Assign(tcase.want)
require.Equal(t, tcase.want, b.Labels())
})
}
}
func TestLabels_Hash(t *testing.T) {
lbls := FromStrings("foo", "bar", "baz", "qux")
require.Equal(t, lbls.Hash(), lbls.Hash())
require.NotEqual(t, lbls.Hash(), Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.")
require.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.")
require.NotEqual(t, lbls.Hash(), FromStrings("foo", "bar").Hash(), "different labels match.")
}
var benchmarkLabelsResult uint64
@ -567,7 +656,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
// Label ~20B name, 50B value.
b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i))
}
return b.Labels(nil)
return b.Labels(EmptyLabels())
}(),
},
{
@ -578,7 +667,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
// Label ~50B name, 50B value.
b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i))
}
return b.Labels(nil)
return b.Labels(EmptyLabels())
}(),
},
{

View file

@ -17,7 +17,6 @@ import (
"bufio"
"fmt"
"os"
"sort"
"strings"
)
@ -51,13 +50,14 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
defer f.Close()
scanner := bufio.NewScanner(f)
b := ScratchBuilder{}
var mets []Labels
hashes := map[uint64]struct{}{}
i := 0
for scanner.Scan() && i < n {
m := make(Labels, 0, 10)
b.Reset()
r := strings.NewReplacer("\"", "", "{", "", "}", "")
s := r.Replace(scanner.Text())
@ -65,10 +65,11 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
labelChunks := strings.Split(s, ",")
for _, labelChunk := range labelChunks {
split := strings.Split(labelChunk, ":")
m = append(m, Label{Name: split[0], Value: split[1]})
b.Add(split[0], split[1])
}
// Order of the k/v labels matters, don't assume we'll always receive them already sorted.
sort.Sort(m)
b.Sort()
m := b.Labels()
h := m.Hash()
if _, ok := hashes[h]; ok {

View file

@ -203,20 +203,20 @@ func (re Regexp) String() string {
// Process returns a relabeled copy of the given label set. The relabel configurations
// are applied in order of input.
// If a label set is dropped, nil is returned.
// If a label set is dropped, EmptyLabels and false is returned.
// May return the input labelSet modified.
func Process(lbls labels.Labels, cfgs ...*Config) labels.Labels {
lb := labels.NewBuilder(nil)
func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) {
lb := labels.NewBuilder(labels.EmptyLabels())
for _, cfg := range cfgs {
lbls = relabel(lbls, cfg, lb)
if lbls == nil {
return nil
lbls, keep = relabel(lbls, cfg, lb)
if !keep {
return labels.EmptyLabels(), false
}
}
return lbls
return lbls, true
}
func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels {
func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) (ret labels.Labels, keep bool) {
var va [16]string
values := va[:0]
if len(cfg.SourceLabels) > cap(values) {
@ -232,19 +232,19 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels
switch cfg.Action {
case Drop:
if cfg.Regex.MatchString(val) {
return nil
return labels.EmptyLabels(), false
}
case Keep:
if !cfg.Regex.MatchString(val) {
return nil
return labels.EmptyLabels(), false
}
case DropEqual:
if lset.Get(cfg.TargetLabel) == val {
return nil
return labels.EmptyLabels(), false
}
case KeepEqual:
if lset.Get(cfg.TargetLabel) != val {
return nil
return labels.EmptyLabels(), false
}
case Replace:
indexes := cfg.Regex.FindStringSubmatchIndex(val)
@ -271,29 +271,29 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels
mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus
lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod))
case LabelMap:
for _, l := range lset {
lset.Range(func(l labels.Label) {
if cfg.Regex.MatchString(l.Name) {
res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement)
lb.Set(res, l.Value)
}
}
})
case LabelDrop:
for _, l := range lset {
lset.Range(func(l labels.Label) {
if cfg.Regex.MatchString(l.Name) {
lb.Del(l.Name)
}
}
})
case LabelKeep:
for _, l := range lset {
lset.Range(func(l labels.Label) {
if !cfg.Regex.MatchString(l.Name) {
lb.Del(l.Name)
}
}
})
default:
panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action))
}
return lb.Labels(lset)
return lb.Labels(lset), true
}
// sum64 sums the md5 hash to an uint64.

View file

@ -28,6 +28,7 @@ func TestRelabel(t *testing.T) {
input labels.Labels
relabel []*Config
output labels.Labels
drop bool
}{
{
input: labels.FromMap(map[string]string{
@ -101,7 +102,7 @@ func TestRelabel(t *testing.T) {
Action: Replace,
},
},
output: nil,
drop: true,
},
{
input: labels.FromMap(map[string]string{
@ -115,7 +116,7 @@ func TestRelabel(t *testing.T) {
Action: Drop,
},
},
output: nil,
drop: true,
},
{
input: labels.FromMap(map[string]string{
@ -177,7 +178,7 @@ func TestRelabel(t *testing.T) {
Action: Keep,
},
},
output: nil,
drop: true,
},
{
input: labels.FromMap(map[string]string{
@ -483,7 +484,7 @@ func TestRelabel(t *testing.T) {
TargetLabel: "__port1",
},
},
output: nil,
drop: true,
},
{
input: labels.FromMap(map[string]string{
@ -517,7 +518,7 @@ func TestRelabel(t *testing.T) {
TargetLabel: "__port2",
},
},
output: nil,
drop: true,
},
}
@ -538,8 +539,11 @@ func TestRelabel(t *testing.T) {
}
}
res := Process(test.input, test.relabel...)
require.Equal(t, test.output, res)
res, keep := Process(test.input, test.relabel...)
require.Equal(t, !test.drop, keep)
if keep {
require.Equal(t, test.output, res)
}
}
}
@ -721,7 +725,7 @@ func BenchmarkRelabel(b *testing.B) {
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = Process(tt.lbls, tt.cfgs...)
_, _ = Process(tt.lbls, tt.cfgs...)
}
})
}

View file

@ -22,7 +22,6 @@ import (
"fmt"
"io"
"math"
"sort"
"strings"
"unicode/utf8"
@ -82,6 +81,7 @@ func (l *openMetricsLexer) Error(es string) {
// This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit
type OpenMetricsParser struct {
l *openMetricsLexer
builder labels.ScratchBuilder
series []byte
text []byte
mtype MetricType
@ -158,14 +158,11 @@ func (p *OpenMetricsParser) Comment() []byte {
// Metric writes the labels of the current sample into the passed labels.
// It returns the string from which the metric was parsed.
func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
// Allocate the full immutable string immediately, so we just
// have to create references on it below.
// Copy the buffer to a string: this is only necessary for the return value.
s := string(p.series)
*l = append(*l, labels.Label{
Name: labels.MetricName,
Value: s[:p.offsets[0]-p.start],
})
p.builder.Reset()
p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start])
for i := 1; i < len(p.offsets); i += 4 {
a := p.offsets[i] - p.start
@ -173,16 +170,16 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
c := p.offsets[i+2] - p.start
d := p.offsets[i+3] - p.start
value := s[c:d]
// Replacer causes allocations. Replace only when necessary.
if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])})
continue
value = lvalReplacer.Replace(value)
}
*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]})
p.builder.Add(s[a:b], value)
}
// Sort labels.
sort.Sort(*l)
p.builder.Sort()
*l = p.builder.Labels()
return s
}
@ -204,17 +201,18 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
e.Ts = p.exemplarTs
}
p.builder.Reset()
for i := 0; i < len(p.eOffsets); i += 4 {
a := p.eOffsets[i] - p.start
b := p.eOffsets[i+1] - p.start
c := p.eOffsets[i+2] - p.start
d := p.eOffsets[i+3] - p.start
e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]})
p.builder.Add(s[a:b], s[c:d])
}
// Sort the labels.
sort.Sort(e.Labels)
p.builder.Sort()
e.Labels = p.builder.Labels()
return true
}

View file

@ -246,7 +246,6 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5`
require.Equal(t, true, found)
require.Equal(t, *exp[i].e, e)
}
res = res[:0]
case EntryType:
m, typ := p.Type()
@ -595,7 +594,7 @@ func TestOMNullByteHandling(t *testing.T) {
},
{
input: "a{b\x00=\"hiih\"} 1",
err: "expected equal, got \"INVALID\"",
err: "expected equal, got \"INVALID\"",
},
{
input: "a\x00{b=\"ddd\"} 1",

View file

@ -21,7 +21,6 @@ import (
"fmt"
"io"
"math"
"sort"
"strconv"
"strings"
"unicode/utf8"
@ -144,6 +143,7 @@ func (l *promlexer) Error(es string) {
// Prometheus text exposition format.
type PromParser struct {
l *promlexer
builder labels.ScratchBuilder
series []byte
text []byte
mtype MetricType
@ -212,14 +212,11 @@ func (p *PromParser) Comment() []byte {
// Metric writes the labels of the current sample into the passed labels.
// It returns the string from which the metric was parsed.
func (p *PromParser) Metric(l *labels.Labels) string {
// Allocate the full immutable string immediately, so we just
// have to create references on it below.
// Copy the buffer to a string: this is only necessary for the return value.
s := string(p.series)
*l = append(*l, labels.Label{
Name: labels.MetricName,
Value: s[:p.offsets[0]-p.start],
})
p.builder.Reset()
p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start])
for i := 1; i < len(p.offsets); i += 4 {
a := p.offsets[i] - p.start
@ -227,16 +224,16 @@ func (p *PromParser) Metric(l *labels.Labels) string {
c := p.offsets[i+2] - p.start
d := p.offsets[i+3] - p.start
value := s[c:d]
// Replacer causes allocations. Replace only when necessary.
if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])})
continue
value = lvalReplacer.Replace(value)
}
*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]})
p.builder.Add(s[a:b], value)
}
// Sort labels to maintain the sorted labels invariant.
sort.Sort(*l)
p.builder.Sort()
*l = p.builder.Labels()
return s
}
@ -343,7 +340,7 @@ func (p *PromParser) Next() (Entry, error) {
t2 = p.nextToken()
}
if t2 != tValue {
return EntryInvalid, parseError("expected value after metric", t)
return EntryInvalid, parseError("expected value after metric", t2)
}
if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil {
return EntryInvalid, err
@ -353,7 +350,7 @@ func (p *PromParser) Next() (Entry, error) {
p.val = math.Float64frombits(value.NormalNaN)
}
p.hasTS = false
switch p.nextToken() {
switch t := p.nextToken(); t {
case tLinebreak:
break
case tTimestamp:
@ -362,7 +359,7 @@ func (p *PromParser) Next() (Entry, error) {
return EntryInvalid, err
}
if t2 := p.nextToken(); t2 != tLinebreak {
return EntryInvalid, parseError("expected next entry after timestamp", t)
return EntryInvalid, parseError("expected next entry after timestamp", t2)
}
default:
return EntryInvalid, parseError("expected timestamp or new record", t)

View file

@ -192,7 +192,6 @@ testmetric{label="\"bar\""} 1`
require.Equal(t, exp[i].t, ts)
require.Equal(t, exp[i].v, v)
require.Equal(t, exp[i].lset, res)
res = res[:0]
case EntryType:
m, typ := p.Type()
@ -220,7 +219,7 @@ func TestPromParseErrors(t *testing.T) {
}{
{
input: "a",
err: "expected value after metric, got \"MNAME\"",
err: "expected value after metric, got \"INVALID\"",
},
{
input: "a{b='c'} 1\n",
@ -264,7 +263,7 @@ func TestPromParseErrors(t *testing.T) {
},
{
input: "foo 0 1_2\n",
err: "expected next entry after timestamp, got \"MNAME\"",
err: "expected next entry after timestamp, got \"INVALID\"",
},
{
input: `{a="ok"} 1`,
@ -322,11 +321,15 @@ func TestPromNullByteHandling(t *testing.T) {
},
{
input: "a{b\x00=\"hiih\"} 1",
err: "expected equal, got \"INVALID\"",
err: "expected equal, got \"INVALID\"",
},
{
input: "a\x00{b=\"ddd\"} 1",
err: "expected value after metric, got \"MNAME\"",
err: "expected value after metric, got \"INVALID\"",
},
{
input: "a 0 1\x00",
err: "expected next entry after timestamp, got \"INVALID\"",
},
}
@ -414,7 +417,7 @@ func BenchmarkParse(b *testing.B) {
case EntrySeries:
m, _, _ := p.Series()
res := make(labels.Labels, 0, 5)
var res labels.Labels
p.Metric(&res)
total += len(m)
@ -426,7 +429,7 @@ func BenchmarkParse(b *testing.B) {
})
b.Run(parserName+"/decode-metric-reuse/"+fn, func(b *testing.B) {
total := 0
res := make(labels.Labels, 0, 5)
var res labels.Labels
b.SetBytes(int64(len(buf) / promtestdataSampleCount))
b.ReportAllocs()
@ -451,7 +454,6 @@ func BenchmarkParse(b *testing.B) {
total += len(m)
i++
res = res[:0]
}
}
}

View file

@ -19,7 +19,6 @@ import (
"fmt"
"io"
"math"
"sort"
"strings"
"unicode/utf8"
@ -59,6 +58,8 @@ type ProtobufParser struct {
// that we have to decode the next MetricFamily.
state Entry
builder labels.ScratchBuilder // held here to reduce allocations when building Labels
mf *dto.MetricFamily
// The following are just shenanigans to satisfy the Parser interface.
@ -245,23 +246,19 @@ func (p *ProtobufParser) Comment() []byte {
// Metric writes the labels of the current sample into the passed labels.
// It returns the string from which the metric was parsed.
func (p *ProtobufParser) Metric(l *labels.Labels) string {
*l = append(*l, labels.Label{
Name: labels.MetricName,
Value: p.getMagicName(),
})
p.builder.Reset()
p.builder.Add(labels.MetricName, p.getMagicName())
for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() {
*l = append(*l, labels.Label{
Name: lp.GetName(),
Value: lp.GetValue(),
})
p.builder.Add(lp.GetName(), lp.GetValue())
}
if needed, name, value := p.getMagicLabel(); needed {
*l = append(*l, labels.Label{Name: name, Value: value})
p.builder.Add(name, value)
}
// Sort labels to maintain the sorted labels invariant.
sort.Sort(*l)
p.builder.Sort()
*l = p.builder.Labels()
return p.metricBytes.String()
}
@ -305,12 +302,12 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
ex.HasTs = true
ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000)
}
p.builder.Reset()
for _, lp := range exProto.GetLabel() {
ex.Labels = append(ex.Labels, labels.Label{
Name: lp.GetName(),
Value: lp.GetValue(),
})
p.builder.Add(lp.GetName(), lp.GetValue())
}
p.builder.Sort()
ex.Labels = p.builder.Labels()
return true
}

View file

@ -630,7 +630,6 @@ metric: <
require.Equal(t, true, found)
require.Equal(t, exp[i].e[0], e)
}
res = res[:0]
case EntryHistogram:
m, ts, shs, fhs := p.Histogram()
@ -642,7 +641,6 @@ metric: <
require.Equal(t, exp[i].t, int64(0))
}
require.Equal(t, exp[i].lset, res)
res = res[:0]
require.Equal(t, exp[i].m, string(m))
if shs != nil {
require.Equal(t, exp[i].shs, shs)

View file

@ -19,11 +19,9 @@ import (
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"path"
"strings"
"sync"
"time"
@ -355,11 +353,11 @@ func (n *Manager) Send(alerts ...*Alert) {
for _, a := range alerts {
lb := labels.NewBuilder(a.Labels)
for _, l := range n.opts.ExternalLabels {
n.opts.ExternalLabels.Range(func(l labels.Label) {
if a.Labels.Get(l.Name) == "" {
lb.Set(l.Name, l.Value)
}
}
})
a.Labels = lb.Labels(a.Labels)
}
@ -396,8 +394,8 @@ func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert {
var relabeledAlerts []*Alert
for _, alert := range alerts {
labels := relabel.Process(alert.Labels, n.opts.RelabelConfigs...)
if labels != nil {
labels, keep := relabel.Process(alert.Labels, n.opts.RelabelConfigs...)
if keep {
alert.Labels = labels
relabeledAlerts = append(relabeledAlerts, alert)
}
@ -572,9 +570,9 @@ func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts {
func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet {
apiLabelSet := models.LabelSet{}
for _, label := range modelLabelSet {
modelLabelSet.Range(func(label labels.Label) {
apiLabelSet[label.Name] = label.Value
}
})
return apiLabelSet
}
@ -721,53 +719,17 @@ func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig
}
}
lset := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...)
if lset == nil {
droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{lbls})
lset, keep := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...)
if !keep {
droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{labels.New(lbls...)})
continue
}
lb := labels.NewBuilder(lset)
// addPort checks whether we should add a default port to the address.
// If the address is not valid, we don't append a port either.
addPort := func(s string) bool {
// If we can split, a port exists and we don't have to add one.
if _, _, err := net.SplitHostPort(s); err == nil {
return false
}
// If adding a port makes it valid, the previous error
// was not due to an invalid address and we can append a port.
_, _, err := net.SplitHostPort(s + ":1234")
return err == nil
}
addr := lset.Get(model.AddressLabel)
// If it's an address with no trailing port, infer it based on the used scheme.
if addPort(addr) {
// Addresses reaching this point are already wrapped in [] if necessary.
switch lset.Get(model.SchemeLabel) {
case "http", "":
addr = addr + ":80"
case "https":
addr = addr + ":443"
default:
return nil, nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme)
}
lb.Set(model.AddressLabel, addr)
}
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
return nil, nil, err
}
// Meta labels are deleted after relabelling. Other internal labels propagate to
// the target which decides whether they will be part of their label set.
for _, l := range lset {
if strings.HasPrefix(l.Name, model.MetaLabelPrefix) {
lb.Del(l.Name)
}
}
res = append(res, alertmanagerLabels{lset})
}
return res, droppedAlertManagers, nil

View file

@ -27,17 +27,7 @@ import (
"github.com/prometheus/prometheus/util/teststorage"
)
func BenchmarkRangeQuery(b *testing.B) {
stor := teststorage.New(b)
defer stor.Close()
opts := EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 50000000,
Timeout: 100 * time.Second,
}
engine := NewEngine(opts)
func setupRangeQueryTestData(stor *teststorage.TestStorage, engine *Engine, interval, numIntervals int) error {
metrics := []labels.Labels{}
metrics = append(metrics, labels.FromStrings("__name__", "a_one"))
metrics = append(metrics, labels.FromStrings("__name__", "b_one"))
@ -65,25 +55,26 @@ func BenchmarkRangeQuery(b *testing.B) {
}
refs := make([]storage.SeriesRef, len(metrics))
// A day of data plus 10k steps.
numIntervals := 8640 + 10000
for s := 0; s < numIntervals; s++ {
a := stor.Appender(context.Background())
ts := int64(s * 10000) // 10s interval.
ts := int64(s * interval)
for i, metric := range metrics {
ref, _ := a.Append(refs[i], metric, ts, float64(s)+float64(i)/float64(len(metrics)))
refs[i] = ref
}
if err := a.Commit(); err != nil {
b.Fatal(err)
return err
}
}
return nil
}
type benchCase struct {
expr string
steps int
}
type benchCase struct {
expr string
steps int
}
func rangeQueryCases() []benchCase {
cases := []benchCase{
// Plain retrieval.
{
@ -210,7 +201,30 @@ func BenchmarkRangeQuery(b *testing.B) {
tmp = append(tmp, benchCase{expr: c.expr, steps: 1000})
}
}
cases = tmp
return tmp
}
func BenchmarkRangeQuery(b *testing.B) {
stor := teststorage.New(b)
defer stor.Close()
opts := EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 50000000,
Timeout: 100 * time.Second,
}
engine := NewEngine(opts)
const interval = 10000 // 10s interval.
// A day of data plus 10k steps.
numIntervals := 8640 + 10000
err := setupRangeQueryTestData(stor, engine, interval, numIntervals)
if err != nil {
b.Fatal(err)
}
cases := rangeQueryCases()
for _, c := range cases {
name := fmt.Sprintf("expr=%s,steps=%d", c.expr, c.steps)
b.Run(name, func(b *testing.B) {

View file

@ -653,12 +653,13 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
query.sampleStats.InitStepTracking(start, start, 1)
val, warnings, err := evaluator.Eval(s.Expr)
evalSpanTimer.Finish()
if err != nil {
return nil, warnings, err
}
evalSpanTimer.Finish()
var mat Matrix
switch result := val.(type) {
@ -704,10 +705,12 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
}
query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval)
val, warnings, err := evaluator.Eval(s.Expr)
evalSpanTimer.Finish()
if err != nil {
return nil, warnings, err
}
evalSpanTimer.Finish()
mat, ok := val.(Matrix)
if !ok {
@ -1029,6 +1032,14 @@ type EvalNodeHelper struct {
resultMetric map[string]labels.Labels
}
func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) {
if enh.lb == nil {
enh.lb = labels.NewBuilder(lbls)
} else {
enh.lb.Reset(lbls)
}
}
// DropMetricName is a cached version of DropMetricName.
func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels {
if enh.Dmn == nil {
@ -1390,10 +1401,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
enh := &EvalNodeHelper{Out: make(Vector, 0, 1)}
// Process all the calls for one time series at a time.
it := storage.NewBuffer(selRange)
var chkIter chunkenc.Iterator
for i, s := range selVS.Series {
ev.currentSamples -= len(points)
points = points[:0]
it.Reset(s.Iterator())
chkIter = s.Iterator(chkIter)
it.Reset(chkIter)
metric := selVS.Series[i].Labels()
// The last_over_time function acts like offset; thus, it
// should keep the metric name. For all the other range
@ -1562,7 +1575,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
case *parser.NumberLiteral:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return append(enh.Out, Sample{Point: Point{V: e.Val}}), nil
return append(enh.Out, Sample{Point: Point{V: e.Val}, Metric: labels.EmptyLabels()}), nil
})
case *parser.StringLiteral:
@ -1575,8 +1588,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
}
mat := make(Matrix, 0, len(e.Series))
it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta))
var chkIter chunkenc.Iterator
for i, s := range e.Series {
it.Reset(s.Iterator())
chkIter = s.Iterator(chkIter)
it.Reset(chkIter)
ss := Series{
Metric: e.Series[i].Labels(),
Points: getPointSlice(numSteps),
@ -1720,8 +1735,10 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect
}
vec := make(Vector, 0, len(node.Series))
it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta))
var chkIter chunkenc.Iterator
for i, s := range node.Series {
it.Reset(s.Iterator())
chkIter = s.Iterator(chkIter)
it.Reset(chkIter)
t, v, h, ok := ev.vectorSelectorSingle(it, node, ts)
if ok {
@ -1809,12 +1826,14 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
}
var chkIter chunkenc.Iterator
series := vs.Series
for i, s := range series {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
ev.error(err)
}
it.Reset(s.Iterator())
chkIter = s.Iterator(chkIter)
it.Reset(chkIter)
ss := Series{
Metric: series[i].Labels(),
}
@ -2141,12 +2160,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
enh.resultMetric = make(map[string]labels.Labels, len(enh.Out))
}
if enh.lb == nil {
enh.lb = labels.NewBuilder(lhs)
} else {
enh.lb.Reset(lhs)
}
enh.resetBuilder(lhs)
buf := bytes.NewBuffer(enh.lblResultBuf[:0])
enh.lblBuf = lhs.Bytes(enh.lblBuf)
buf.Write(enh.lblBuf)
@ -2179,7 +2193,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
}
}
ret := enh.lb.Labels(nil)
ret := enh.lb.Labels(labels.EmptyLabels())
enh.resultMetric[str] = ret
return ret
}
@ -2219,7 +2233,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
}
func dropMetricName(l labels.Labels) labels.Labels {
return labels.NewBuilder(l).Del(labels.MetricName).Labels(nil)
return labels.NewBuilder(l).Del(labels.MetricName).Labels(labels.EmptyLabels())
}
// scalarBinop evaluates a binary operation between two Scalars.
@ -2346,15 +2360,14 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
}
}
lb := labels.NewBuilder(nil)
var buf []byte
for si, s := range vec {
metric := s.Metric
if op == parser.COUNT_VALUES {
lb.Reset(metric)
lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64))
metric = lb.Labels(nil)
enh.resetBuilder(metric)
enh.lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64))
metric = enh.lb.Labels(labels.EmptyLabels())
// We've changed the metric so we have to recompute the grouping key.
recomputeGroupingKey = true
@ -2371,14 +2384,18 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
group, ok := result[groupingKey]
// Add a new group if it doesn't exist.
if !ok {
lb.Reset(metric)
var m labels.Labels
enh.resetBuilder(metric)
if without {
lb.Del(grouping...)
lb.Del(labels.MetricName)
enh.lb.Del(grouping...)
enh.lb.Del(labels.MetricName)
m = enh.lb.Labels(labels.EmptyLabels())
} else if len(grouping) > 0 {
enh.lb.Keep(grouping...)
m = enh.lb.Labels(labels.EmptyLabels())
} else {
lb.Keep(grouping...)
m = labels.EmptyLabels()
}
m := lb.Labels(nil)
newAgg := &groupedAggregation{
labels: m,
value: s.V,

View file

@ -684,6 +684,7 @@ load 10s
Result: Matrix{
Series{
Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}},
Metric: labels.EmptyLabels(),
},
},
Start: time.Unix(0, 0),
@ -3162,7 +3163,48 @@ func TestSparseHistogramRate(t *testing.T) {
app := test.Storage().Appender(context.TODO())
for i, h := range tsdb.GenerateTestHistograms(100) {
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h)
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h, nil)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
require.NoError(t, test.Run())
engine := test.QueryEngine()
queryString := fmt.Sprintf("rate(%s[1m])", seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond)))
require.NoError(t, err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
vector, err := res.Vector()
require.NoError(t, err)
require.Len(t, vector, 1)
actualHistogram := vector[0].H
expectedHistogram := &histogram.FloatHistogram{
Schema: 1,
ZeroThreshold: 0.001,
ZeroCount: 1. / 15.,
Count: 4. / 15.,
Sum: 1.226666666666667,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
}
require.Equal(t, expectedHistogram, actualHistogram)
}
func TestSparseFloatHistogramRate(t *testing.T) {
// TODO(beorn7): Integrate histograms into the PromQL testing framework
// and write more tests there.
test, err := NewTest(t, "")
require.NoError(t, err)
defer test.Close()
seriesName := "sparse_histogram_series"
lbls := labels.FromStrings("__name__", seriesName)
app := test.Storage().Appender(context.TODO())
for i, fh := range tsdb.GenerateTestFloatHistograms(100) {
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), nil, fh)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
@ -3211,48 +3253,63 @@ func TestSparseHistogram_HistogramCountAndSum(t *testing.T) {
},
NegativeBuckets: []int64{2, 1, -2, 3},
}
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
test, err := NewTest(t, "")
require.NoError(t, err)
t.Cleanup(test.Close)
test, err := NewTest(t, "")
require.NoError(t, err)
t.Cleanup(test.Close)
seriesName := "sparse_histogram_series"
lbls := labels.FromStrings("__name__", seriesName)
engine := test.QueryEngine()
seriesName := "sparse_histogram_series"
lbls := labels.FromStrings("__name__", seriesName)
engine := test.QueryEngine()
ts := int64(10 * time.Minute / time.Millisecond)
app := test.Storage().Appender(context.TODO())
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat())
} else {
_, err = app.AppendHistogram(0, lbls, ts, h, nil)
}
require.NoError(t, err)
require.NoError(t, app.Commit())
ts := int64(10 * time.Minute / time.Millisecond)
app := test.Storage().Appender(context.TODO())
_, err = app.AppendHistogram(0, lbls, ts, h)
require.NoError(t, err)
require.NoError(t, app.Commit())
queryString := fmt.Sprintf("histogram_count(%s)", seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
queryString := fmt.Sprintf("histogram_count(%s)", seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
vector, err := res.Vector()
require.NoError(t, err)
vector, err := res.Vector()
require.NoError(t, err)
require.Len(t, vector, 1)
require.Nil(t, vector[0].H)
if floatHisto {
require.Equal(t, float64(h.ToFloat().Count), vector[0].V)
} else {
require.Equal(t, float64(h.Count), vector[0].V)
}
require.Len(t, vector, 1)
require.Nil(t, vector[0].H)
require.Equal(t, float64(h.Count), vector[0].V)
queryString = fmt.Sprintf("histogram_sum(%s)", seriesName)
qry, err = engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
queryString = fmt.Sprintf("histogram_sum(%s)", seriesName)
qry, err = engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res = qry.Exec(test.Context())
require.NoError(t, res.Err)
res = qry.Exec(test.Context())
require.NoError(t, res.Err)
vector, err = res.Vector()
require.NoError(t, err)
vector, err = res.Vector()
require.NoError(t, err)
require.Len(t, vector, 1)
require.Nil(t, vector[0].H)
require.Equal(t, h.Sum, vector[0].V)
require.Len(t, vector, 1)
require.Nil(t, vector[0].H)
if floatHisto {
require.Equal(t, h.ToFloat().Sum, vector[0].V)
} else {
require.Equal(t, h.Sum, vector[0].V)
}
})
}
}
func TestSparseHistogram_HistogramQuantile(t *testing.T) {
@ -3451,36 +3508,43 @@ func TestSparseHistogram_HistogramQuantile(t *testing.T) {
test, err := NewTest(t, "")
require.NoError(t, err)
t.Cleanup(test.Close)
for i, c := range cases {
t.Run(c.text, func(t *testing.T) {
seriesName := "sparse_histogram_series"
lbls := labels.FromStrings("__name__", seriesName)
engine := test.QueryEngine()
idx := int64(0)
for _, floatHisto := range []bool{true, false} {
for _, c := range cases {
t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) {
seriesName := "sparse_histogram_series"
lbls := labels.FromStrings("__name__", seriesName)
engine := test.QueryEngine()
ts := idx * int64(10*time.Minute/time.Millisecond)
app := test.Storage().Appender(context.TODO())
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat())
} else {
_, err = app.AppendHistogram(0, lbls, ts, c.h, nil)
}
require.NoError(t, err)
require.NoError(t, app.Commit())
ts := int64(i+1) * int64(10*time.Minute/time.Millisecond)
app := test.Storage().Appender(context.TODO())
_, err = app.AppendHistogram(0, lbls, ts, c.h)
require.NoError(t, err)
require.NoError(t, app.Commit())
for j, sc := range c.subCases {
t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) {
queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
for j, sc := range c.subCases {
t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) {
queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
vector, err := res.Vector()
require.NoError(t, err)
vector, err := res.Vector()
require.NoError(t, err)
require.Len(t, vector, 1)
require.Nil(t, vector[0].H)
require.True(t, almostEqual(sc.value, vector[0].V))
})
}
})
require.Len(t, vector, 1)
require.Nil(t, vector[0].H)
require.True(t, almostEqual(sc.value, vector[0].V))
})
}
idx++
})
}
}
}
@ -3870,45 +3934,52 @@ func TestSparseHistogram_HistogramFraction(t *testing.T) {
}, invariantCases...),
},
}
idx := int64(0)
for _, floatHisto := range []bool{true, false} {
for _, c := range cases {
t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) {
test, err := NewTest(t, "")
require.NoError(t, err)
t.Cleanup(test.Close)
for i, c := range cases {
t.Run(c.text, func(t *testing.T) {
test, err := NewTest(t, "")
require.NoError(t, err)
t.Cleanup(test.Close)
seriesName := "sparse_histogram_series"
lbls := labels.FromStrings("__name__", seriesName)
engine := test.QueryEngine()
seriesName := "sparse_histogram_series"
lbls := labels.FromStrings("__name__", seriesName)
engine := test.QueryEngine()
ts := idx * int64(10*time.Minute/time.Millisecond)
app := test.Storage().Appender(context.TODO())
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat())
} else {
_, err = app.AppendHistogram(0, lbls, ts, c.h, nil)
}
require.NoError(t, err)
require.NoError(t, app.Commit())
ts := int64(i+1) * int64(10*time.Minute/time.Millisecond)
app := test.Storage().Appender(context.TODO())
_, err = app.AppendHistogram(0, lbls, ts, c.h)
require.NoError(t, err)
require.NoError(t, app.Commit())
for j, sc := range c.subCases {
t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) {
queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
for j, sc := range c.subCases {
t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) {
queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
vector, err := res.Vector()
require.NoError(t, err)
vector, err := res.Vector()
require.NoError(t, err)
require.Len(t, vector, 1)
require.Nil(t, vector[0].H)
if math.IsNaN(sc.value) {
require.True(t, math.IsNaN(vector[0].V))
return
}
require.Equal(t, sc.value, vector[0].V)
})
}
})
require.Len(t, vector, 1)
require.Nil(t, vector[0].H)
if math.IsNaN(sc.value) {
require.True(t, math.IsNaN(vector[0].V))
return
}
require.Equal(t, sc.value, vector[0].V)
})
}
idx++
})
}
}
}
@ -3998,60 +4069,68 @@ func TestSparseHistogram_Sum_Count_AddOperator(t *testing.T) {
},
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
test, err := NewTest(t, "")
require.NoError(t, err)
t.Cleanup(test.Close)
seriesName := "sparse_histogram_series"
engine := test.QueryEngine()
ts := int64(i+1) * int64(10*time.Minute/time.Millisecond)
app := test.Storage().Appender(context.TODO())
for idx, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx))
// Since we mutate h later, we need to create a copy here.
_, err = app.AppendHistogram(0, lbls, ts, h.Copy())
idx0 := int64(0)
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
test, err := NewTest(t, "")
require.NoError(t, err)
}
require.NoError(t, app.Commit())
t.Cleanup(test.Close)
queryAndCheck := func(queryString string, exp Vector) {
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
seriesName := "sparse_histogram_series"
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
engine := test.QueryEngine()
vector, err := res.Vector()
require.NoError(t, err)
ts := idx0 * int64(10*time.Minute/time.Millisecond)
app := test.Storage().Appender(context.TODO())
for idx1, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1))
// Since we mutate h later, we need to create a copy here.
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
} else {
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
}
require.NoError(t, err)
}
require.NoError(t, app.Commit())
require.Equal(t, exp, vector)
}
queryAndCheck := func(queryString string, exp Vector) {
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
// sum().
queryString := fmt.Sprintf("sum(%s)", seriesName)
queryAndCheck(queryString, []Sample{
{Point{T: ts, H: &c.expected}, labels.Labels{}},
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
vector, err := res.Vector()
require.NoError(t, err)
require.Equal(t, exp, vector)
}
// sum().
queryString := fmt.Sprintf("sum(%s)", seriesName)
queryAndCheck(queryString, []Sample{
{Point{T: ts, H: &c.expected}, labels.EmptyLabels()},
})
// + operator.
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
for idx := 1; idx < len(c.histograms); idx++ {
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
}
queryAndCheck(queryString, []Sample{
{Point{T: ts, H: &c.expected}, labels.EmptyLabels()},
})
// count().
queryString = fmt.Sprintf("count(%s)", seriesName)
queryAndCheck(queryString, []Sample{
{Point{T: ts, V: 3}, labels.EmptyLabels()},
})
})
// + operator.
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
for idx := 1; idx < len(c.histograms); idx++ {
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
}
queryAndCheck(queryString, []Sample{
{Point{T: ts, H: &c.expected}, labels.Labels{}},
})
// count().
queryString = fmt.Sprintf("count(%s)", seriesName)
queryAndCheck(queryString, []Sample{
{Point{T: ts, V: 3}, labels.Labels{}},
})
})
idx0++
}
}
}

View file

@ -302,7 +302,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode
// The trend factor argument.
tf := vals[2].(Vector)[0].V
// Sanity check the input.
// Check that the input parameters are valid.
if sf <= 0 || sf >= 1 {
panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf))
}
@ -957,7 +957,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
if !ok {
sample.Metric = labels.NewBuilder(sample.Metric).
Del(excludedLabels...).
Labels(nil)
Labels(labels.EmptyLabels())
mb = &metricWithBuckets{sample.Metric, nil}
enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb
@ -1077,7 +1077,7 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod
if len(res) > 0 {
lb.Set(dst, string(res))
}
outMetric = lb.Labels(nil)
outMetric = lb.Labels(labels.EmptyLabels())
enh.Dmn[h] = outMetric
}
}
@ -1145,7 +1145,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe
lb.Set(dst, strval)
}
outMetric = lb.Labels(nil)
outMetric = lb.Labels(labels.EmptyLabels())
enh.Dmn[h] = outMetric
}
@ -1383,7 +1383,7 @@ func (s *vectorByReverseValueHeap) Pop() interface{} {
// createLabelsForAbsentFunction returns the labels that are uniquely and exactly matched
// in a given expression. It is used in the absent functions.
func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels {
m := labels.Labels{}
b := labels.NewBuilder(labels.EmptyLabels())
var lm []*labels.Matcher
switch n := expr.(type) {
@ -1392,25 +1392,26 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels {
case *parser.MatrixSelector:
lm = n.VectorSelector.(*parser.VectorSelector).LabelMatchers
default:
return m
return labels.EmptyLabels()
}
empty := []string{}
// The 'has' map implements backwards-compatibility for historic behaviour:
// e.g. in `absent(x{job="a",job="b",foo="bar"})` then `job` is removed from the output.
// Note this gives arguably wrong behaviour for `absent(x{job="a",job="a",foo="bar"})`.
has := make(map[string]bool, len(lm))
for _, ma := range lm {
if ma.Name == labels.MetricName {
continue
}
if ma.Type == labels.MatchEqual && !m.Has(ma.Name) {
m = labels.NewBuilder(m).Set(ma.Name, ma.Value).Labels(nil)
if ma.Type == labels.MatchEqual && !has[ma.Name] {
b.Set(ma.Name, ma.Value)
has[ma.Name] = true
} else {
empty = append(empty, ma.Name)
b.Del(ma.Name)
}
}
for _, v := range empty {
m = labels.NewBuilder(m).Del(v).Labels(nil)
}
return m
return b.Labels(labels.EmptyLabels())
}
func stringFromArg(e parser.Expr) string {

View file

@ -16,13 +16,13 @@ package parser
import (
"math"
"sort"
"strconv"
"time"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
)
%}
%union {
@ -32,6 +32,7 @@ import (
matcher *labels.Matcher
label labels.Label
labels labels.Labels
lblList []labels.Label
strings []string
series []SequenceValue
uint uint64
@ -138,10 +139,9 @@ START_METRIC_SELECTOR
// Type definitions for grammar rules.
%type <matchers> label_match_list
%type <matcher> label_matcher
%type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors
%type <labels> label_set label_set_list metric
%type <labels> label_set metric
%type <lblList> label_set_list
%type <label> label_set_item
%type <strings> grouping_label_list grouping_labels maybe_grouping_labels
%type <series> series_item series_values
@ -567,7 +567,7 @@ label_matcher : IDENTIFIER match_op STRING
*/
metric : metric_identifier label_set
{ $$ = append($2, labels.Label{Name: labels.MetricName, Value: $1.Val}); sort.Sort($$) }
{ b := labels.NewBuilder($2); b.Set(labels.MetricName, $1.Val); $$ = b.Labels(labels.EmptyLabels()) }
| label_set
{$$ = $1}
;

View file

@ -9,7 +9,6 @@ import __yyfmt__ "fmt"
import (
"math"
"sort"
"strconv"
"time"
@ -26,6 +25,7 @@ type yySymType struct {
matcher *labels.Matcher
label labels.Label
labels labels.Labels
lblList []labels.Label
strings []string
series []SequenceValue
uint uint64
@ -458,7 +458,7 @@ var yyPact = [...]int{
var yyPgo = [...]int{
0, 267, 7, 265, 2, 264, 262, 164, 261, 257,
115, 253, 181, 8, 252, 4, 5, 251, 250, 0,
115, 181, 253, 8, 252, 4, 5, 251, 250, 0,
23, 248, 6, 247, 246, 245, 10, 64, 244, 239,
1, 231, 230, 9, 217, 21, 214, 213, 205, 201,
198, 196, 189, 188, 206, 3, 180, 165, 127,
@ -474,10 +474,10 @@ var yyR1 = [...]int{
31, 33, 33, 32, 32, 32, 40, 38, 38, 38,
24, 24, 24, 9, 9, 36, 42, 42, 42, 42,
42, 43, 44, 44, 44, 35, 35, 35, 1, 1,
1, 2, 2, 2, 2, 12, 12, 7, 7, 7,
1, 2, 2, 2, 2, 11, 11, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 10,
10, 10, 10, 11, 11, 11, 13, 13, 13, 13,
10, 10, 10, 12, 12, 12, 13, 13, 13, 13,
48, 18, 18, 18, 18, 17, 17, 17, 17, 17,
21, 21, 21, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 6, 6, 6, 6, 6,
@ -513,14 +513,14 @@ var yyR2 = [...]int{
}
var yyChk = [...]int{
-1000, -47, 75, 76, 77, 78, 2, 10, -12, -7,
-1000, -47, 75, 76, 77, 78, 2, 10, -11, -7,
-10, 47, 48, 62, 49, 50, 51, 12, 32, 33,
36, 52, 16, 53, 66, 54, 55, 56, 57, 58,
68, 71, 72, 13, -48, -12, 10, -30, -25, -28,
68, 71, 72, 13, -48, -11, 10, -30, -25, -28,
-31, -36, -37, -38, -40, -41, -42, -43, -44, -24,
-3, 12, 17, 15, 23, -8, -7, -35, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
26, 42, 13, -44, -10, -11, 18, -13, 12, 2,
26, 42, 13, -44, -10, -12, 18, -13, 12, 2,
-18, 2, 26, 44, 27, 28, 30, 31, 32, 33,
34, 35, 36, 37, 38, 39, 41, 42, 66, 43,
14, -26, -33, 2, 62, 68, 15, -33, -30, -30,
@ -1492,8 +1492,9 @@ yydefault:
yyDollar = yyS[yypt-2 : yypt+1]
//line promql/parser/generated_parser.y:570
{
yyVAL.labels = append(yyDollar[2].labels, labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val})
sort.Sort(yyVAL.labels)
b := labels.NewBuilder(yyDollar[2].labels)
b.Set(labels.MetricName, yyDollar[1].item.Val)
yyVAL.labels = b.Labels(labels.EmptyLabels())
}
case 96:
yyDollar = yyS[yypt-1 : yypt+1]
@ -1505,13 +1506,13 @@ yydefault:
yyDollar = yyS[yypt-3 : yypt+1]
//line promql/parser/generated_parser.y:579
{
yyVAL.labels = labels.New(yyDollar[2].labels...)
yyVAL.labels = labels.New(yyDollar[2].lblList...)
}
case 120:
yyDollar = yyS[yypt-4 : yypt+1]
//line promql/parser/generated_parser.y:581
{
yyVAL.labels = labels.New(yyDollar[2].labels...)
yyVAL.labels = labels.New(yyDollar[2].lblList...)
}
case 121:
yyDollar = yyS[yypt-2 : yypt+1]
@ -1529,20 +1530,20 @@ yydefault:
yyDollar = yyS[yypt-3 : yypt+1]
//line promql/parser/generated_parser.y:589
{
yyVAL.labels = append(yyDollar[1].labels, yyDollar[3].label)
yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label)
}
case 124:
yyDollar = yyS[yypt-1 : yypt+1]
//line promql/parser/generated_parser.y:591
{
yyVAL.labels = []labels.Label{yyDollar[1].label}
yyVAL.lblList = []labels.Label{yyDollar[1].label}
}
case 125:
yyDollar = yyS[yypt-2 : yypt+1]
//line promql/parser/generated_parser.y:593
{
yylex.(*parser).unexpected("label set", "\",\" or \"}\"")
yyVAL.labels = yyDollar[1].labels
yyVAL.lblList = yyDollar[1].lblList
}
case 126:
yyDollar = yyS[yypt-3 : yypt+1]

View file

@ -432,7 +432,7 @@ func (p *parser) expectType(node Node, want ValueType, context string) {
}
}
// checkAST checks the sanity of the provided AST. This includes type checking.
// checkAST checks the validity of the provided AST. This includes type checking.
func (p *parser) checkAST(node Node) (typ ValueType) {
// For expressions the type is determined by their Type function.
// Lists do not have a type but are not invalid either.

View file

@ -14,10 +14,16 @@
package promql
import (
"context"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/util/teststorage"
)
func TestEvaluations(t *testing.T) {
@ -34,3 +40,60 @@ func TestEvaluations(t *testing.T) {
})
}
}
// Run a lot of queries at the same time, to check for race conditions.
func TestConcurrentRangeQueries(t *testing.T) {
stor := teststorage.New(t)
defer stor.Close()
opts := EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 50000000,
Timeout: 100 * time.Second,
}
engine := NewEngine(opts)
const interval = 10000 // 10s interval.
// A day of data plus 10k steps.
numIntervals := 8640 + 10000
err := setupRangeQueryTestData(stor, engine, interval, numIntervals)
require.NoError(t, err)
cases := rangeQueryCases()
// Limit the number of queries running at the same time.
const numConcurrent = 4
sem := make(chan struct{}, numConcurrent)
for i := 0; i < numConcurrent; i++ {
sem <- struct{}{}
}
var g errgroup.Group
for _, c := range cases {
c := c
if strings.Contains(c.expr, "count_values") && c.steps > 10 {
continue // This test is too big to run with -race.
}
<-sem
g.Go(func() error {
defer func() {
sem <- struct{}{}
}()
qry, err := engine.NewRangeQuery(
stor, nil, c.expr,
time.Unix(int64((numIntervals-c.steps)*10), 0),
time.Unix(int64(numIntervals*10), 0), time.Second*10)
if err != nil {
return err
}
res := qry.Exec(context.Background())
if res.Err != nil {
return res.Err
}
qry.Close()
return nil
})
}
err = g.Wait()
require.NoError(t, err)
}

View file

@ -202,7 +202,7 @@ func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) {
break
}
if f, err := parseNumber(defLine); err == nil {
cmd.expect(0, nil, parser.SequenceValue{Value: f})
cmd.expect(0, parser.SequenceValue{Value: f})
break
}
metric, vals, err := parser.ParseSeriesDesc(defLine)
@ -218,7 +218,7 @@ func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) {
if len(vals) > 1 {
return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed")
}
cmd.expect(j, metric, vals...)
cmd.expectMetric(j, metric, vals...)
}
return i, cmd, nil
}
@ -368,13 +368,15 @@ func (ev *evalCmd) String() string {
return "eval"
}
// expect adds a new metric with a sequence of values to the set of expected
// expect adds a sequence of values to the set of expected
// results for the query.
func (ev *evalCmd) expect(pos int, m labels.Labels, vals ...parser.SequenceValue) {
if m == nil {
ev.expected[0] = entry{pos: pos, vals: vals}
return
}
func (ev *evalCmd) expect(pos int, vals ...parser.SequenceValue) {
ev.expected[0] = entry{pos: pos, vals: vals}
}
// expectMetric adds a new metric with a sequence of values to the set of expected
// results for the query.
func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.SequenceValue) {
h := m.Hash()
ev.metrics[h] = m
ev.expected[h] = entry{pos: pos, vals: vals}
@ -491,8 +493,8 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa
})
if containsNonStepInvariant {
// Since there is a step invariant function, we cannot automatically
// generate step invariant test cases for it sanely.
// Expression contains a function whose result can vary with evaluation
// time, even though its arguments are step invariant: skip it.
return nil, nil
}

View file

@ -127,11 +127,11 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
require.NoError(t, err)
for _, s := range tc.series {
var matchers []*labels.Matcher
for _, label := range s.Metric {
s.Metric.Range(func(label labels.Label) {
m, err := labels.NewMatcher(labels.MatchEqual, label.Name, label.Value)
require.NoError(t, err)
matchers = append(matchers, m)
}
})
// Get the series for the matcher.
ss := querier.Select(false, nil, matchers...)
@ -143,7 +143,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
got := Series{
Metric: storageSeries.Labels(),
}
it := storageSeries.Iterator()
it := storageSeries.Iterator(nil)
for it.Next() == chunkenc.ValFloat {
t, v := it.At()
got.Points = append(got.Points, Point{T: t, V: v})

View file

@ -363,7 +363,11 @@ func (ss *StorageSeries) Labels() labels.Labels {
}
// Iterator returns a new iterator of the data of the series.
func (ss *StorageSeries) Iterator() chunkenc.Iterator {
func (ss *StorageSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
if ssi, ok := it.(*storageSeriesIterator); ok {
ssi.reset(ss.series)
return ssi
}
return newStorageSeriesIterator(ss.series)
}
@ -379,6 +383,11 @@ func newStorageSeriesIterator(series Series) *storageSeriesIterator {
}
}
func (ssi *storageSeriesIterator) reset(series Series) {
ssi.points = series.Points
ssi.curr = -1
}
func (ssi *storageSeriesIterator) Seek(t int64) chunkenc.ValueType {
i := ssi.curr
if i < 0 {

View file

@ -146,10 +146,7 @@ func NewAlertingRule(
labels, annotations, externalLabels labels.Labels, externalURL string,
restored bool, logger log.Logger,
) *AlertingRule {
el := make(map[string]string, len(externalLabels))
for _, lbl := range externalLabels {
el[lbl.Name] = lbl.Value
}
el := externalLabels.Map()
return &AlertingRule{
name: name,
@ -217,16 +214,16 @@ func (r *AlertingRule) Annotations() labels.Labels {
func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
lb := labels.NewBuilder(r.labels)
for _, l := range alert.Labels {
alert.Labels.Range(func(l labels.Label) {
lb.Set(l.Name, l.Value)
}
})
lb.Set(labels.MetricName, alertMetricName)
lb.Set(labels.AlertName, r.name)
lb.Set(alertStateLabel, alert.State.String())
s := promql.Sample{
Metric: lb.Labels(nil),
Metric: lb.Labels(labels.EmptyLabels()),
Point: promql.Point{T: timestamp.FromTime(ts), V: 1},
}
return s
@ -236,15 +233,15 @@ func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample {
lb := labels.NewBuilder(r.labels)
for _, l := range alert.Labels {
alert.Labels.Range(func(l labels.Label) {
lb.Set(l.Name, l.Value)
}
})
lb.Set(labels.MetricName, alertForStateMetricName)
lb.Set(labels.AlertName, r.name)
s := promql.Sample{
Metric: lb.Labels(nil),
Metric: lb.Labels(labels.EmptyLabels()),
Point: promql.Point{T: timestamp.FromTime(ts), V: v},
}
return s
@ -254,13 +251,13 @@ func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) pro
func (r *AlertingRule) QueryforStateSeries(alert *Alert, q storage.Querier) (storage.Series, error) {
smpl := r.forStateSample(alert, time.Now(), 0)
var matchers []*labels.Matcher
for _, l := range smpl.Metric {
smpl.Metric.Range(func(l labels.Label) {
mt, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value)
if err != nil {
panic(err)
}
matchers = append(matchers, mt)
}
})
sset := q.Select(false, nil, matchers...)
var s storage.Series
@ -268,7 +265,7 @@ func (r *AlertingRule) QueryforStateSeries(alert *Alert, q storage.Querier) (sto
// Query assures that smpl.Metric is included in sset.At().Labels(),
// hence just checking the length would act like equality.
// (This is faster than calling labels.Compare again as we already have some info).
if len(sset.At().Labels()) == len(matchers) {
if sset.At().Labels().Len() == len(matchers) {
s = sset.At()
break
}
@ -327,10 +324,7 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim
alerts := make(map[uint64]*Alert, len(res))
for _, smpl := range res {
// Provide the alert information to the template.
l := make(map[string]string, len(smpl.Metric))
for _, lbl := range smpl.Metric {
l[lbl.Name] = lbl.Value
}
l := smpl.Metric.Map()
tmplData := template.AlertTemplateData(l, r.externalLabels, r.externalURL, smpl.V)
// Inject some convenience variables that are easier to remember for users
@ -363,17 +357,18 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim
lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricName)
for _, l := range r.labels {
r.labels.Range(func(l labels.Label) {
lb.Set(l.Name, expand(l.Value))
}
})
lb.Set(labels.AlertName, r.Name())
annotations := make(labels.Labels, 0, len(r.annotations))
for _, a := range r.annotations {
annotations = append(annotations, labels.Label{Name: a.Name, Value: expand(a.Value)})
}
sb := labels.ScratchBuilder{}
r.annotations.Range(func(a labels.Label) {
sb.Add(a.Name, expand(a.Value))
})
annotations := sb.Labels()
lbs := lb.Labels(nil)
lbs := lb.Labels(labels.EmptyLabels())
h := lbs.Hash()
resultFPs[h] = struct{}{}

View file

@ -661,6 +661,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
}
var (
numOutOfOrder = 0
numTooOld = 0
numDuplicates = 0
)
@ -685,10 +686,16 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error())
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample):
numOutOfOrder++
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrTooOldSample):
numTooOld++
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
numDuplicates++
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
@ -703,6 +710,9 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
if numOutOfOrder > 0 {
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder)
}
if numTooOld > 0 {
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Error on ingesting too old result from rule evaluation", "numDropped", numTooOld)
}
if numDuplicates > 0 {
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates)
}
@ -712,9 +722,14 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
// Series no longer exposed, mark it stale.
_, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-evaluationDelay)), math.Float64frombits(value.StaleNaN))
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case unwrappedErr == nil:
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample),
errors.Is(unwrappedErr, storage.ErrTooOldSample),
errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
// Do not count these in logging, as this is expected if series
// is exposed from a different rule.
default:
@ -750,9 +765,14 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) {
// Rule that produced series no longer configured, mark it stale.
_, err := app.Append(0, s, timestamp.FromTime(ts.Add(-evaluationDelay)), math.Float64frombits(value.StaleNaN))
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case unwrappedErr == nil:
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample),
errors.Is(unwrappedErr, storage.ErrTooOldSample),
errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
// Do not count these in logging, as this is expected if series
// is exposed from a different rule.
default:
@ -821,7 +841,7 @@ func (g *Group) RestoreForState(ts time.Time) {
// Series found for the 'for' state.
var t int64
var v float64
it := s.Iterator()
it := s.Iterator(nil)
for it.Next() == chunkenc.ValFloat {
t, v = it.At()
}

View file

@ -607,12 +607,13 @@ func TestStaleness(t *testing.T) {
// Convert a SeriesSet into a form usable with require.Equal.
func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.Point, error) {
result := map[string][]promql.Point{}
var it chunkenc.Iterator
for ss.Next() {
series := ss.At()
points := []promql.Point{}
it := series.Iterator()
it := series.Iterator(it)
for it.Next() == chunkenc.ValFloat {
t, v := it.At()
points = append(points, promql.Point{T: t, V: v})

View file

@ -85,11 +85,11 @@ func (rule *RecordingRule) Eval(ctx context.Context, evalDelay time.Duration, ts
lb.Set(labels.MetricName, rule.name)
for _, l := range rule.labels {
rule.labels.Range(func(l labels.Label) {
lb.Set(l.Name, l.Value)
}
})
sample.Metric = lb.Labels(nil)
sample.Metric = lb.Labels(labels.EmptyLabels())
}
// Check that the rule does not produce identical metrics after applying

View file

@ -42,7 +42,7 @@ func (a nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.E
return 0, nil
}
func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram) (storage.SeriesRef, error) {
func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) {
return 0, nil
}
@ -60,8 +60,9 @@ type sample struct {
}
type histogramSample struct {
t int64
h *histogram.Histogram
t int64
h *histogram.Histogram
fh *histogram.FloatHistogram
}
// collectResultAppender records all samples that were added through the appender.
@ -110,13 +111,13 @@ func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.L
return a.next.AppendExemplar(ref, l, e)
}
func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, t: t})
func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t})
if a.next == nil {
return 0, nil
}
return a.next.AppendHistogram(ref, l, t, h)
return a.next.AppendHistogram(ref, l, t, h, fh)
}
func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {

View file

@ -313,6 +313,18 @@ func (m *Manager) TargetsAll() map[string][]*Target {
return targets
}
// ScrapePools returns the list of all scrape pool names.
func (m *Manager) ScrapePools() []string {
m.mtxScrape.Lock()
defer m.mtxScrape.Unlock()
names := make([]string, 0, len(m.scrapePools))
for name := range m.scrapePools {
names = append(names, name)
}
return names
}
// TargetsActive returns the active targets currently being scraped.
func (m *Manager) TargetsActive() map[string][]*Target {
m.mtxScrape.Lock()

View file

@ -14,6 +14,7 @@
package scrape
import (
"context"
"net/http"
"strconv"
"testing"
@ -24,6 +25,7 @@ import (
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
@ -149,8 +151,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
res: nil,
resOrig: nil,
res: labels.EmptyLabels(),
resOrig: labels.EmptyLabels(),
err: "no address",
},
// Address label missing, but added in relabelling.
@ -242,8 +244,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
res: nil,
resOrig: nil,
res: labels.EmptyLabels(),
resOrig: labels.EmptyLabels(),
err: "invalid label value for \"custom\": \"\\xbd\"",
},
// Invalid duration in interval label.
@ -259,8 +261,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
res: nil,
resOrig: nil,
res: labels.EmptyLabels(),
resOrig: labels.EmptyLabels(),
err: "error parsing scrape interval: not a valid duration string: \"2notseconds\"",
},
// Invalid duration in timeout label.
@ -276,8 +278,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
res: nil,
resOrig: nil,
res: labels.EmptyLabels(),
resOrig: labels.EmptyLabels(),
err: "error parsing scrape timeout: not a valid duration string: \"2notseconds\"",
},
// 0 interval in timeout label.
@ -293,8 +295,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
res: nil,
resOrig: nil,
res: labels.EmptyLabels(),
resOrig: labels.EmptyLabels(),
err: "scrape interval cannot be 0",
},
// 0 duration in timeout label.
@ -310,8 +312,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
res: nil,
resOrig: nil,
res: labels.EmptyLabels(),
resOrig: labels.EmptyLabels(),
err: "scrape timeout cannot be 0",
},
// Timeout less than interval.
@ -328,8 +330,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
res: nil,
resOrig: nil,
res: labels.EmptyLabels(),
resOrig: labels.EmptyLabels(),
err: "scrape timeout cannot be greater than scrape interval (\"2s\" > \"1s\")",
},
// Don't attach default port.
@ -635,3 +637,69 @@ global:
t.Error("Jitter should not be the same on different set of external labels")
}
}
func TestManagerScrapePools(t *testing.T) {
cfgText1 := `
scrape_configs:
- job_name: job1
static_configs:
- targets: ["foo:9090"]
- job_name: job2
static_configs:
- targets: ["foo:9091", "foo:9092"]
`
cfgText2 := `
scrape_configs:
- job_name: job1
static_configs:
- targets: ["foo:9090", "foo:9094"]
- job_name: job3
static_configs:
- targets: ["foo:9093"]
`
var (
cfg1 = loadConfiguration(t, cfgText1)
cfg2 = loadConfiguration(t, cfgText2)
)
reload := func(scrapeManager *Manager, cfg *config.Config) {
newLoop := func(scrapeLoopOptions) loop {
return noopLoop()
}
scrapeManager.scrapePools = map[string]*scrapePool{}
for _, sc := range cfg.ScrapeConfigs {
_, cancel := context.WithCancel(context.Background())
defer cancel()
sp := &scrapePool{
appendable: &nopAppendable{},
activeTargets: map[uint64]*Target{},
loops: map[uint64]loop{
1: noopLoop(),
},
newLoop: newLoop,
logger: nil,
config: sc,
client: http.DefaultClient,
cancel: cancel,
}
for _, c := range sc.ServiceDiscoveryConfigs {
staticConfig := c.(discovery.StaticConfig)
for _, group := range staticConfig {
for i := range group.Targets {
sp.activeTargets[uint64(i)] = &Target{}
}
}
}
scrapeManager.scrapePools[sc.JobName] = sp
}
}
opts := Options{}
scrapeManager := NewManager(&opts, nil, nil)
reload(scrapeManager, cfg1)
require.ElementsMatch(t, []string{"job1", "job2"}, scrapeManager.ScrapePools())
reload(scrapeManager, cfg2)
require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools())
}

View file

@ -268,6 +268,7 @@ type scrapeLoopOptions struct {
const maxAheadTime = 10 * time.Minute
// returning an empty label set is interpreted as "drop"
type labelsMutator func(labels.Labels) labels.Labels
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) {
@ -498,9 +499,9 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
}
targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures)))
for _, t := range targets {
if t.Labels().Len() > 0 {
if !t.Labels().IsEmpty() {
all = append(all, t)
} else if t.DiscoveredLabels().Len() > 0 {
} else if !t.DiscoveredLabels().IsEmpty() {
sp.droppedTargets = append(sp.droppedTargets, t)
}
}
@ -634,7 +635,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
met := lset.Get(labels.MetricName)
if limits.labelLimit > 0 {
nbLabels := len(lset)
nbLabels := lset.Len()
if nbLabels > int(limits.labelLimit) {
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit)
}
@ -644,7 +645,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
return nil
}
for _, l := range lset {
return lset.Validate(func(l labels.Label) error {
if limits.labelNameLengthLimit > 0 {
nameLength := len(l.Name)
if nameLength > int(limits.labelNameLengthLimit) {
@ -658,8 +659,8 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit)
}
}
}
return nil
return nil
})
}
func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels {
@ -667,37 +668,37 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
targetLabels := target.Labels()
if honor {
for _, l := range targetLabels {
targetLabels.Range(func(l labels.Label) {
if !lset.Has(l.Name) {
lb.Set(l.Name, l.Value)
}
}
})
} else {
var conflictingExposedLabels labels.Labels
for _, l := range targetLabels {
var conflictingExposedLabels []labels.Label
targetLabels.Range(func(l labels.Label) {
existingValue := lset.Get(l.Name)
if existingValue != "" {
conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue})
}
// It is now safe to set the target label.
lb.Set(l.Name, l.Value)
}
})
if len(conflictingExposedLabels) > 0 {
resolveConflictingExposedLabels(lb, lset, targetLabels, conflictingExposedLabels)
}
}
res := lb.Labels(nil)
res := lb.Labels(labels.EmptyLabels())
if len(rc) > 0 {
res = relabel.Process(res, rc...)
res, _ = relabel.Process(res, rc...)
}
return res
}
func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLabels, conflictingExposedLabels labels.Labels) {
func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLabels labels.Labels, conflictingExposedLabels []labels.Label) {
sort.SliceStable(conflictingExposedLabels, func(i, j int) bool {
return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name)
})
@ -708,7 +709,7 @@ func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLa
newName = model.ExportedLabelPrefix + newName
if !exposedLabels.Has(newName) &&
!targetLabels.Has(newName) &&
!conflictingExposedLabels[:i].Has(newName) {
!labelSliceHas(conflictingExposedLabels[:i], newName) {
conflictingExposedLabels[i].Name = newName
break
}
@ -720,15 +721,24 @@ func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLa
}
}
func labelSliceHas(lbls []labels.Label, name string) bool {
for _, l := range lbls {
if l.Name == name {
return true
}
}
return false
}
func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels {
lb := labels.NewBuilder(lset)
for _, l := range target.Labels() {
target.Labels().Range(func(l labels.Label) {
lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name))
lb.Set(l.Name, l.Value)
}
})
return lb.Labels(nil)
return lb.Labels(labels.EmptyLabels())
}
// appender returns an appender for ingested samples from the target.
@ -1531,6 +1541,7 @@ loop:
parsedTimestamp *int64
val float64
h *histogram.Histogram
fh *histogram.FloatHistogram
)
if et, err = p.Next(); err != nil {
if err == io.EOF {
@ -1558,8 +1569,7 @@ loop:
t := defTime
if isHistogram {
met, parsedTimestamp, h, _ = p.Histogram()
// TODO: ingest float histograms in tsdb.
met, parsedTimestamp, h, fh = p.Histogram()
} else {
met, parsedTimestamp, val = p.Series()
}
@ -1599,8 +1609,8 @@ loop:
// and relabeling and store the final label set.
lset = sl.sampleMutator(lset)
// The label set may be set to nil to indicate dropping.
if lset == nil {
// The label set may be set to empty to indicate dropping.
if lset.IsEmpty() {
sl.cache.addDropped(mets)
continue
}
@ -1609,6 +1619,10 @@ loop:
err = errNameLabelMandatory
break loop
}
if !lset.IsValid() {
err = fmt.Errorf("invalid metric name or label names: %s", lset.String())
break loop
}
// If any label limits is exceeded the scrape should fail.
if err = verifyLabelLimits(lset, sl.labelLimits); err != nil {
@ -1622,7 +1636,9 @@ loop:
if isHistogram {
if h != nil {
ref, err = app.AppendHistogram(ref, lset, t, h)
ref, err = app.AppendHistogram(ref, lset, t, h, nil)
} else {
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
}
} else {
ref, err = app.Append(ref, lset, t, val)
@ -1853,12 +1869,10 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v
ref = ce.ref
lset = ce.lset
} else {
lset = labels.Labels{
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
// with scraped metrics in the cache.
// We have to drop it when building the actual metric.
labels.Label{Name: labels.MetricName, Value: s[:len(s)-1]},
}
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
// with scraped metrics in the cache.
// We have to drop it when building the actual metric.
lset = labels.FromStrings(labels.MetricName, s[:len(s)-1])
lset = sl.reportSampleMutator(lset)
}

View file

@ -1020,6 +1020,51 @@ func TestScrapeLoopSeriesAdded(t *testing.T) {
require.Equal(t, 0, seriesAdded)
}
func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
s := teststorage.New(t)
defer s.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
target := &Target{
labels: labels.FromStrings("pod_label_invalid_012", "test"),
}
relabelConfig := []*relabel.Config{{
Action: relabel.LabelMap,
Regex: relabel.MustNewRegexp("pod_label_invalid_(.+)"),
Separator: ";",
Replacement: "$1",
}}
sl := newScrapeLoop(ctx,
&testScraper{},
nil, nil,
func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, target, true, relabelConfig)
},
nopMutator,
s.Appender,
nil,
0,
true,
0,
nil,
0,
0,
false,
false,
nil,
false,
)
slApp := sl.appender(ctx)
total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{})
require.ErrorContains(t, err, "invalid metric name or label names")
require.NoError(t, slApp.Rollback())
require.Equal(t, 1, total)
require.Equal(t, 0, added)
require.Equal(t, 0, seriesAdded)
}
func makeTestMetrics(n int) []byte {
// Construct a metrics string to parse
sb := bytes.Buffer{}
@ -1578,7 +1623,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
nil, nil, nil,
func(l labels.Labels) labels.Labels {
if l.Has("deleteme") {
return nil
return labels.EmptyLabels()
}
return l
},
@ -2914,7 +2959,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
c := 0
for series.Next() {
i := series.At().Iterator()
i := series.At().Iterator(nil)
for i.Next() != chunkenc.ValNone {
c++
}
@ -2987,7 +3032,7 @@ func TestScrapeReportLimit(t *testing.T) {
var found bool
for series.Next() {
i := series.At().Iterator()
i := series.At().Iterator(nil)
for i.Next() == chunkenc.ValFloat {
_, v := i.At()
require.Equal(t, 1.0, v)

View file

@ -172,22 +172,20 @@ func (t *Target) offset(interval time.Duration, jitterSeed uint64) time.Duration
// Labels returns a copy of the set of all public labels of the target.
func (t *Target) Labels() labels.Labels {
lset := make(labels.Labels, 0, len(t.labels))
for _, l := range t.labels {
b := labels.NewScratchBuilder(t.labels.Len())
t.labels.Range(func(l labels.Label) {
if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) {
lset = append(lset, l)
b.Add(l.Name, l.Value)
}
}
return lset
})
return b.Labels()
}
// DiscoveredLabels returns a copy of the target's labels before any processing.
func (t *Target) DiscoveredLabels() labels.Labels {
t.mtx.Lock()
defer t.mtx.Unlock()
lset := make(labels.Labels, len(t.discoveredLabels))
copy(lset, t.discoveredLabels)
return lset
return t.discoveredLabels.Copy()
}
// SetDiscoveredLabels sets new DiscoveredLabels
@ -205,9 +203,9 @@ func (t *Target) URL() *url.URL {
params[k] = make([]string, len(v))
copy(params[k], v)
}
for _, l := range t.labels {
t.labels.Range(func(l labels.Label) {
if !strings.HasPrefix(l.Name, model.ParamLabelPrefix) {
continue
return
}
ks := l.Name[len(model.ParamLabelPrefix):]
@ -216,7 +214,7 @@ func (t *Target) URL() *url.URL {
} else {
params[ks] = []string{l.Value}
}
}
})
return &url.URL{
Scheme: t.labels.Get(model.SchemeLabel),
@ -374,15 +372,15 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig, noDefaultPort
}
}
preRelabelLabels := lb.Labels(nil)
lset = relabel.Process(preRelabelLabels, cfg.RelabelConfigs...)
preRelabelLabels := lb.Labels(labels.EmptyLabels())
lset, keep := relabel.Process(preRelabelLabels, cfg.RelabelConfigs...)
// Check if the target was dropped.
if lset == nil {
return nil, preRelabelLabels, nil
if !keep {
return labels.EmptyLabels(), preRelabelLabels, nil
}
if v := lset.Get(model.AddressLabel); v == "" {
return nil, nil, errors.New("no address")
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address")
}
lb = labels.NewBuilder(lset)
@ -413,7 +411,7 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig, noDefaultPort
case "https":
addr = addr + ":443"
default:
return nil, nil, errors.Errorf("invalid scheme: %q", cfg.Scheme)
return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme)
}
lb.Set(model.AddressLabel, addr)
}
@ -434,50 +432,54 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig, noDefaultPort
}
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
return nil, nil, err
return labels.EmptyLabels(), labels.EmptyLabels(), err
}
interval := lset.Get(model.ScrapeIntervalLabel)
intervalDuration, err := model.ParseDuration(interval)
if err != nil {
return nil, nil, errors.Errorf("error parsing scrape interval: %v", err)
return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("error parsing scrape interval: %v", err)
}
if time.Duration(intervalDuration) == 0 {
return nil, nil, errors.New("scrape interval cannot be 0")
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape interval cannot be 0")
}
timeout := lset.Get(model.ScrapeTimeoutLabel)
timeoutDuration, err := model.ParseDuration(timeout)
if err != nil {
return nil, nil, errors.Errorf("error parsing scrape timeout: %v", err)
return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("error parsing scrape timeout: %v", err)
}
if time.Duration(timeoutDuration) == 0 {
return nil, nil, errors.New("scrape timeout cannot be 0")
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape timeout cannot be 0")
}
if timeoutDuration > intervalDuration {
return nil, nil, errors.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval)
return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval)
}
// Meta labels are deleted after relabelling. Other internal labels propagate to
// the target which decides whether they will be part of their label set.
for _, l := range lset {
lset.Range(func(l labels.Label) {
if strings.HasPrefix(l.Name, model.MetaLabelPrefix) {
lb.Del(l.Name)
}
}
})
// Default the instance label to the target address.
if v := lset.Get(model.InstanceLabel); v == "" {
lb.Set(model.InstanceLabel, addr)
}
res = lb.Labels(nil)
for _, l := range res {
res = lb.Labels(labels.EmptyLabels())
err = res.Validate(func(l labels.Label) error {
// Check label values are valid, drop the target if not.
if !model.LabelValue(l.Value).IsValid() {
return nil, nil, errors.Errorf("invalid label value for %q: %q", l.Name, l.Value)
return errors.Errorf("invalid label value for %q: %q", l.Name, l.Value)
}
return nil
})
if err != nil {
return labels.EmptyLabels(), labels.EmptyLabels(), err
}
return res, preRelabelLabels, nil
}
@ -501,12 +503,12 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault
lset := labels.New(lbls...)
lbls, origLabels, err := PopulateLabels(lset, cfg, noDefaultPort)
lset, origLabels, err := PopulateLabels(lset, cfg, noDefaultPort)
if err != nil {
failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg))
}
if lbls != nil || origLabels != nil {
targets = append(targets, NewTarget(lbls, origLabels, cfg.Params))
if !lset.IsEmpty() || !origLabels.IsEmpty() {
targets = append(targets, NewTarget(lset, origLabels, cfg.Params))
}
}
return targets, failures

View file

@ -129,7 +129,7 @@ func newTestTarget(targetURL string, deadline time.Duration, lbls labels.Labels)
lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://"))
lb.Set(model.MetricsPathLabel, "/metrics")
return &Target{labels: lb.Labels(nil)}
return &Target{labels: lb.Labels(labels.EmptyLabels())}
}
func TestNewHTTPBearerToken(t *testing.T) {

View file

@ -174,14 +174,14 @@ func (f *fanoutAppender) AppendExemplar(ref SeriesRef, l labels.Labels, e exempl
return ref, nil
}
func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error) {
ref, err := f.primary.AppendHistogram(ref, l, t, h)
func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) {
ref, err := f.primary.AppendHistogram(ref, l, t, h, fh)
if err != nil {
return ref, err
}
for _, appender := range f.secondaries {
if _, err := appender.AppendHistogram(ref, l, t, h); err != nil {
if _, err := appender.AppendHistogram(ref, l, t, h, fh); err != nil {
return 0, err
}
}

View file

@ -86,11 +86,12 @@ func TestFanout_SelectSorted(t *testing.T) {
result := make(map[int64]float64)
var labelsResult labels.Labels
var iterator chunkenc.Iterator
for seriesSet.Next() {
series := seriesSet.At()
seriesLabels := series.Labels()
labelsResult = seriesLabels
iterator := series.Iterator()
iterator := series.Iterator(iterator)
for iterator.Next() == chunkenc.ValFloat {
timestamp, value := iterator.At()
result[timestamp] = value
@ -112,11 +113,12 @@ func TestFanout_SelectSorted(t *testing.T) {
result := make(map[int64]float64)
var labelsResult labels.Labels
var iterator chunkenc.Iterator
for seriesSet.Next() {
series := seriesSet.At()
seriesLabels := series.Labels()
labelsResult = seriesLabels
iterator := series.Iterator()
iterator := series.Iterator(iterator)
for iterator.Next() == chunkenc.ValFloat {
timestamp, value := iterator.At()
result[timestamp] = value

View file

@ -285,7 +285,7 @@ type HistogramAppender interface {
// For efficiency reasons, the histogram is passed as a
// pointer. AppendHistogram won't mutate the histogram, but in turn
// depends on the caller to not mutate it either.
AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error)
AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error)
}
// MetadataUpdater provides an interface for associating metadata to stored series.
@ -385,7 +385,7 @@ func (s mockSeries) Labels() labels.Labels {
return labels.FromStrings(s.labelSet...)
}
func (s mockSeries) Iterator() chunkenc.Iterator {
func (s mockSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator {
return chunkenc.MockSeriesIterator(s.timestamps, s.values)
}
@ -424,14 +424,17 @@ type Labels interface {
}
type SampleIterable interface {
// Iterator returns a new, independent iterator of the data of the series.
Iterator() chunkenc.Iterator
// Iterator returns an iterator of the data of the series.
// The iterator passed as argument is for re-use, if not nil.
// Depending on implementation, the iterator can
// be re-used or a new iterator can be allocated.
Iterator(chunkenc.Iterator) chunkenc.Iterator
}
type ChunkIterable interface {
// Iterator returns a new, independent iterator that iterates over potentially overlapping
// Iterator returns an iterator that iterates over potentially overlapping
// chunks of the series, sorted by min time.
Iterator() chunks.Iterator
Iterator(chunks.Iterator) chunks.Iterator
}
type Warnings []error

View file

@ -425,12 +425,8 @@ func ChainedSeriesMerge(series ...Series) Series {
}
return &SeriesEntry{
Lset: series[0].Labels(),
SampleIteratorFn: func() chunkenc.Iterator {
iterators := make([]chunkenc.Iterator, 0, len(series))
for _, s := range series {
iterators = append(iterators, s.Iterator())
}
return NewChainSampleIterator(iterators)
SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator {
return ChainSampleIteratorFromSeries(it, series)
},
}
}
@ -446,15 +442,42 @@ type chainSampleIterator struct {
lastT int64
}
// NewChainSampleIterator returns a single iterator that iterates over the samples from the given iterators in a sorted
// fashion. If samples overlap, one sample from overlapped ones is kept (randomly) and all others with the same
// timestamp are dropped.
func NewChainSampleIterator(iterators []chunkenc.Iterator) chunkenc.Iterator {
return &chainSampleIterator{
iterators: iterators,
h: nil,
lastT: math.MinInt64,
// Return a chainSampleIterator initialized for length entries, re-using the memory from it if possible.
func getChainSampleIterator(it chunkenc.Iterator, length int) *chainSampleIterator {
csi, ok := it.(*chainSampleIterator)
if !ok {
csi = &chainSampleIterator{}
}
if cap(csi.iterators) < length {
csi.iterators = make([]chunkenc.Iterator, length)
} else {
csi.iterators = csi.iterators[:length]
}
csi.h = nil
csi.lastT = math.MinInt64
return csi
}
func ChainSampleIteratorFromSeries(it chunkenc.Iterator, series []Series) chunkenc.Iterator {
csi := getChainSampleIterator(it, len(series))
for i, s := range series {
csi.iterators[i] = s.Iterator(csi.iterators[i])
}
return csi
}
func ChainSampleIteratorFromMetas(it chunkenc.Iterator, chunks []chunks.Meta) chunkenc.Iterator {
csi := getChainSampleIterator(it, len(chunks))
for i, c := range chunks {
csi.iterators[i] = c.Chunk.Iterator(csi.iterators[i])
}
return csi
}
func ChainSampleIteratorFromIterators(it chunkenc.Iterator, iterators []chunkenc.Iterator) chunkenc.Iterator {
csi := getChainSampleIterator(it, 0)
csi.iterators = iterators
return csi
}
func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType {
@ -607,10 +630,10 @@ func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalC
}
return &ChunkSeriesEntry{
Lset: series[0].Labels(),
ChunkIteratorFn: func() chunks.Iterator {
ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
iterators := make([]chunks.Iterator, 0, len(series))
for _, s := range series {
iterators = append(iterators, s.Iterator())
iterators = append(iterators, s.Iterator(nil))
}
return &compactChunkIterator{
mergeFunc: mergeFunc,
@ -676,7 +699,7 @@ func (c *compactChunkIterator) Next() bool {
// 1:1 duplicates, skip it.
} else {
// We operate on same series, so labels does not matter here.
overlapping = append(overlapping, newChunkToSeriesDecoder(nil, next))
overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next))
if next.MaxTime > oMaxTime {
oMaxTime = next.MaxTime
}
@ -693,7 +716,7 @@ func (c *compactChunkIterator) Next() bool {
}
// Add last as it's not yet included in overlap. We operate on same series, so labels does not matter here.
iter = NewSeriesToChunkEncoder(c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(nil, c.curr))...)).Iterator()
iter = NewSeriesToChunkEncoder(c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), c.curr))...)).Iterator(nil)
if !iter.Next() {
if c.err = iter.Err(); c.err != nil {
return false
@ -751,10 +774,10 @@ func NewConcatenatingChunkSeriesMerger() VerticalChunkSeriesMergeFunc {
}
return &ChunkSeriesEntry{
Lset: series[0].Labels(),
ChunkIteratorFn: func() chunks.Iterator {
ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
iterators := make([]chunks.Iterator, 0, len(series))
for _, s := range series {
iterators = append(iterators, s.Iterator())
iterators = append(iterators, s.Iterator(nil))
}
return &concatenatingChunkIterator{
iterators: iterators,

View file

@ -202,8 +202,8 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
expectedSeries := tc.expected.At()
require.Equal(t, expectedSeries.Labels(), actualSeries.Labels())
expSmpl, expErr := ExpandSamples(expectedSeries.Iterator(), nil)
actSmpl, actErr := ExpandSamples(actualSeries.Iterator(), nil)
expSmpl, expErr := ExpandSamples(expectedSeries.Iterator(nil), nil)
actSmpl, actErr := ExpandSamples(actualSeries.Iterator(nil), nil)
require.Equal(t, expErr, actErr)
require.Equal(t, expSmpl, actSmpl)
}
@ -370,8 +370,8 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
expectedSeries := tc.expected.At()
require.Equal(t, expectedSeries.Labels(), actualSeries.Labels())
expChks, expErr := ExpandChunks(expectedSeries.Iterator())
actChks, actErr := ExpandChunks(actualSeries.Iterator())
expChks, expErr := ExpandChunks(expectedSeries.Iterator(nil))
actChks, actErr := ExpandChunks(actualSeries.Iterator(nil))
require.Equal(t, expErr, actErr)
require.Equal(t, expChks, actChks)
@ -533,8 +533,8 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
merged := m(tc.input...)
require.Equal(t, tc.expected.Labels(), merged.Labels())
actChks, actErr := ExpandChunks(merged.Iterator())
expChks, expErr := ExpandChunks(tc.expected.Iterator())
actChks, actErr := ExpandChunks(merged.Iterator(nil))
expChks, expErr := ExpandChunks(tc.expected.Iterator(nil))
require.Equal(t, expErr, actErr)
require.Equal(t, expChks, actChks)
@ -667,8 +667,8 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
merged := m(tc.input...)
require.Equal(t, tc.expected.Labels(), merged.Labels())
actChks, actErr := ExpandChunks(merged.Iterator())
expChks, expErr := ExpandChunks(tc.expected.Iterator())
actChks, actErr := ExpandChunks(merged.Iterator(nil))
expChks, expErr := ExpandChunks(tc.expected.Iterator(nil))
require.Equal(t, expErr, actErr)
require.Equal(t, expChks, actChks)
@ -809,7 +809,7 @@ func TestChainSampleIterator(t *testing.T) {
expected: []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
},
} {
merged := NewChainSampleIterator(tc.input)
merged := ChainSampleIteratorFromIterators(nil, tc.input)
actual, err := ExpandSamples(merged, nil)
require.NoError(t, err)
require.Equal(t, tc.expected, actual)
@ -855,7 +855,7 @@ func TestChainSampleIteratorSeek(t *testing.T) {
expected: []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
},
} {
merged := NewChainSampleIterator(tc.input)
merged := ChainSampleIteratorFromIterators(nil, tc.input)
actual := []tsdbutil.Sample{}
if merged.Seek(tc.seek) == chunkenc.ValFloat {
t, v := merged.At()
@ -868,9 +868,7 @@ func TestChainSampleIteratorSeek(t *testing.T) {
}
}
var result []tsdbutil.Sample
func makeSeriesSet(numSeries, numSamples int) SeriesSet {
func makeSeries(numSeries, numSamples int) []Series {
series := []Series{}
for j := 0; j < numSeries; j++ {
labels := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
@ -880,30 +878,39 @@ func makeSeriesSet(numSeries, numSamples int) SeriesSet {
}
series = append(series, NewListSeries(labels, samples))
}
return NewMockSeriesSet(series...)
return series
}
func makeMergeSeriesSet(numSeriesSets, numSeries, numSamples int) SeriesSet {
seriesSets := []genericSeriesSet{}
for i := 0; i < numSeriesSets; i++ {
seriesSets = append(seriesSets, &genericSeriesSetAdapter{makeSeriesSet(numSeries, numSamples)})
func makeMergeSeriesSet(serieses [][]Series) SeriesSet {
seriesSets := make([]genericSeriesSet, len(serieses))
for i, s := range serieses {
seriesSets[i] = &genericSeriesSetAdapter{NewMockSeriesSet(s...)}
}
return &seriesSetAdapter{newGenericMergeSeriesSet(seriesSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: ChainedSeriesMerge}).Merge)}
}
func benchmarkDrain(seriesSet SeriesSet, b *testing.B) {
func benchmarkDrain(b *testing.B, makeSeriesSet func() SeriesSet) {
var err error
var t int64
var v float64
var iter chunkenc.Iterator
for n := 0; n < b.N; n++ {
seriesSet := makeSeriesSet()
for seriesSet.Next() {
result, err = ExpandSamples(seriesSet.At().Iterator(), nil)
require.NoError(b, err)
iter = seriesSet.At().Iterator(iter)
for iter.Next() == chunkenc.ValFloat {
t, v = iter.At()
}
err = iter.Err()
}
require.NoError(b, err)
require.NotEqual(b, t, v) // To ensure the inner loop doesn't get optimised away.
}
}
func BenchmarkNoMergeSeriesSet_100_100(b *testing.B) {
seriesSet := makeSeriesSet(100, 100)
benchmarkDrain(seriesSet, b)
series := makeSeries(100, 100)
benchmarkDrain(b, func() SeriesSet { return NewMockSeriesSet(series...) })
}
func BenchmarkMergeSeriesSet(b *testing.B) {
@ -914,9 +921,12 @@ func BenchmarkMergeSeriesSet(b *testing.B) {
{10, 100, 100},
{100, 100, 100},
} {
seriesSet := makeMergeSeriesSet(bm.numSeriesSets, bm.numSeries, bm.numSamples)
serieses := [][]Series{}
for i := 0; i < bm.numSeriesSets; i++ {
serieses = append(serieses, makeSeries(bm.numSeries, bm.numSamples))
}
b.Run(fmt.Sprintf("%d_%d_%d", bm.numSeriesSets, bm.numSeries, bm.numSamples), func(b *testing.B) {
benchmarkDrain(seriesSet, b)
benchmarkDrain(b, func() SeriesSet { return makeMergeSeriesSet(serieses) })
})
}
}

View file

@ -33,6 +33,7 @@ import (
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
)
// decodeReadLimit is the maximum size of a read request body in bytes.
@ -115,9 +116,10 @@ func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHi
func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, storage.Warnings, error) {
numSamples := 0
resp := &prompb.QueryResult{}
var iter chunkenc.Iterator
for ss.Next() {
series := ss.At()
iter := series.Iterator()
iter = series.Iterator(iter)
samples := []prompb.Sample{}
for iter.Next() == chunkenc.ValFloat {
@ -151,10 +153,10 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet {
series := make([]storage.Series, 0, len(res.Timeseries))
for _, ts := range res.Timeseries {
lbls := labelProtosToLabels(ts.Labels)
if err := validateLabelsAndMetricName(lbls); err != nil {
if err := validateLabelsAndMetricName(ts.Labels); err != nil {
return errSeriesSet{err: err}
}
lbls := labelProtosToLabels(ts.Labels)
series = append(series, &concreteSeries{labels: lbls, samples: ts.Samples})
}
@ -199,17 +201,19 @@ func StreamChunkedReadResponses(
var (
chks []prompb.Chunk
lbls []prompb.Label
iter chunks.Iterator
)
for ss.Next() {
series := ss.At()
iter := series.Iterator()
iter = series.Iterator(iter)
lbls = MergeLabels(labelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels)
frameBytesLeft := maxBytesInFrame
maxDataLength := maxBytesInFrame
for _, lbl := range lbls {
frameBytesLeft -= lbl.Size()
maxDataLength -= lbl.Size()
}
frameBytesLeft := maxDataLength
isNext := iter.Next()
@ -255,6 +259,7 @@ func StreamChunkedReadResponses(
// We immediately flush the Write() so it is safe to return to the pool.
marshalPool.Put(&b)
chks = chks[:0]
frameBytesLeft = maxDataLength
}
if err := iter.Err(); err != nil {
return ss.Warnings(), err
@ -343,10 +348,14 @@ type concreteSeries struct {
}
func (c *concreteSeries) Labels() labels.Labels {
return labels.New(c.labels...)
return c.labels.Copy()
}
func (c *concreteSeries) Iterator() chunkenc.Iterator {
func (c *concreteSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
if csi, ok := it.(*concreteSeriesIterator); ok {
csi.reset(c)
return csi
}
return newConcreteSeriersIterator(c)
}
@ -363,6 +372,11 @@ func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator {
}
}
func (c *concreteSeriesIterator) reset(series *concreteSeries) {
c.cur = -1
c.series = series
}
// Seek implements storage.SeriesIterator.
func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
if c.cur == -1 {
@ -429,7 +443,7 @@ func (c *concreteSeriesIterator) Err() error {
// validateLabelsAndMetricName validates the label names/values and metric names returned from remote read,
// also making sure that there are no labels with duplicate names
func validateLabelsAndMetricName(ls labels.Labels) error {
func validateLabelsAndMetricName(ls []prompb.Label) error {
for i, l := range ls {
if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
return fmt.Errorf("invalid metric name: %v", l.Value)
@ -569,30 +583,24 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
}
func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels {
result := make(labels.Labels, 0, len(labelPairs))
b := labels.ScratchBuilder{}
for _, l := range labelPairs {
result = append(result, labels.Label{
Name: l.Name,
Value: l.Value,
})
b.Add(l.Name, l.Value)
}
sort.Sort(result)
return result
b.Sort()
return b.Labels()
}
// labelsToLabelsProto transforms labels into prompb labels. The buffer slice
// will be used to avoid allocations if it is big enough to store the labels.
func labelsToLabelsProto(labels labels.Labels, buf []prompb.Label) []prompb.Label {
func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label {
result := buf[:0]
if cap(buf) < len(labels) {
result = make([]prompb.Label, 0, len(labels))
}
for _, l := range labels {
lbls.Range(func(l labels.Label) {
result = append(result, prompb.Label{
Name: l.Name,
Value: l.Value,
})
}
})
return result
}

View file

@ -16,8 +16,10 @@ package remote
import (
"bytes"
"fmt"
"sync"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
@ -26,6 +28,7 @@ import (
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
)
var testHistogram = histogram.Histogram{
@ -71,86 +74,86 @@ var writeRequestFixture = &prompb.WriteRequest{
func TestValidateLabelsAndMetricName(t *testing.T) {
tests := []struct {
input labels.Labels
input []prompb.Label
expectedErr string
description string
}{
{
input: labels.FromStrings(
"__name__", "name",
"labelName", "labelValue",
),
input: []prompb.Label{
{Name: "__name__", Value: "name"},
{Name: "labelName", Value: "labelValue"},
},
expectedErr: "",
description: "regular labels",
},
{
input: labels.FromStrings(
"__name__", "name",
"_labelName", "labelValue",
),
input: []prompb.Label{
{Name: "__name__", Value: "name"},
{Name: "_labelName", Value: "labelValue"},
},
expectedErr: "",
description: "label name with _",
},
{
input: labels.FromStrings(
"__name__", "name",
"@labelName", "labelValue",
),
input: []prompb.Label{
{Name: "__name__", Value: "name"},
{Name: "@labelName", Value: "labelValue"},
},
expectedErr: "invalid label name: @labelName",
description: "label name with @",
},
{
input: labels.FromStrings(
"__name__", "name",
"123labelName", "labelValue",
),
input: []prompb.Label{
{Name: "__name__", Value: "name"},
{Name: "123labelName", Value: "labelValue"},
},
expectedErr: "invalid label name: 123labelName",
description: "label name starts with numbers",
},
{
input: labels.FromStrings(
"__name__", "name",
"", "labelValue",
),
input: []prompb.Label{
{Name: "__name__", Value: "name"},
{Name: "", Value: "labelValue"},
},
expectedErr: "invalid label name: ",
description: "label name is empty string",
},
{
input: labels.FromStrings(
"__name__", "name",
"labelName", string([]byte{0xff}),
),
input: []prompb.Label{
{Name: "__name__", Value: "name"},
{Name: "labelName", Value: string([]byte{0xff})},
},
expectedErr: "invalid label value: " + string([]byte{0xff}),
description: "label value is an invalid UTF-8 value",
},
{
input: labels.FromStrings(
"__name__", "@invalid_name",
),
input: []prompb.Label{
{Name: "__name__", Value: "@invalid_name"},
},
expectedErr: "invalid metric name: @invalid_name",
description: "metric name starts with @",
},
{
input: labels.FromStrings(
"__name__", "name1",
"__name__", "name2",
),
input: []prompb.Label{
{Name: "__name__", Value: "name1"},
{Name: "__name__", Value: "name2"},
},
expectedErr: "duplicate label with name: __name__",
description: "duplicate label names",
},
{
input: labels.FromStrings(
"label1", "name",
"label2", "name",
),
input: []prompb.Label{
{Name: "label1", Value: "name"},
{Name: "label2", Value: "name"},
},
expectedErr: "",
description: "duplicate label values",
},
{
input: labels.FromStrings(
"", "name",
"label2", "name",
),
input: []prompb.Label{
{Name: "", Value: "name"},
{Name: "label2", Value: "name"},
},
expectedErr: "invalid label name: ",
description: "don't report as duplicate label name",
},
@ -197,8 +200,7 @@ func TestConcreteSeriesClonesLabels(t *testing.T) {
gotLabels := cs.Labels()
require.Equal(t, lbls, gotLabels)
gotLabels[0].Value = "foo"
gotLabels[1].Value = "bar"
gotLabels.CopyFrom(labels.FromStrings("a", "foo", "c", "foo"))
gotLabels = cs.Labels()
require.Equal(t, lbls, gotLabels)
@ -215,7 +217,7 @@ func TestConcreteSeriesIterator(t *testing.T) {
{Value: 4, Timestamp: 4},
},
}
it := series.Iterator()
it := series.Iterator(nil)
// Seek to the first sample with ts=1.
require.Equal(t, chunkenc.ValFloat, it.Seek(1))
@ -367,3 +369,119 @@ func TestNilHistogramProto(t *testing.T) {
// values, causing the test to fail.
HistogramProtoToHistogram(prompb.Histogram{})
}
func TestStreamResponse(t *testing.T) {
lbs1 := labelsToLabelsProto(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil)
lbs2 := labelsToLabelsProto(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)
chunk := prompb.Chunk{
Type: prompb.Chunk_XOR,
Data: make([]byte, 100),
}
lbSize, chunkSize := 0, chunk.Size()
for _, lb := range lbs1 {
lbSize += lb.Size()
}
maxBytesInFrame := lbSize + chunkSize*2
testData := []*prompb.ChunkedSeries{{
Labels: lbs1,
Chunks: []prompb.Chunk{chunk, chunk, chunk, chunk},
}, {
Labels: lbs2,
Chunks: []prompb.Chunk{chunk, chunk, chunk, chunk},
}}
css := newMockChunkSeriesSet(testData)
writer := mockWriter{}
warning, err := StreamChunkedReadResponses(&writer, 0,
css,
nil,
maxBytesInFrame,
&sync.Pool{})
require.Nil(t, warning)
require.Nil(t, err)
expectData := []*prompb.ChunkedSeries{{
Labels: lbs1,
Chunks: []prompb.Chunk{chunk, chunk},
}, {
Labels: lbs1,
Chunks: []prompb.Chunk{chunk, chunk},
}, {
Labels: lbs2,
Chunks: []prompb.Chunk{chunk, chunk},
}, {
Labels: lbs2,
Chunks: []prompb.Chunk{chunk, chunk},
}}
require.Equal(t, expectData, writer.actual)
}
type mockWriter struct {
actual []*prompb.ChunkedSeries
}
func (m *mockWriter) Write(p []byte) (n int, err error) {
cr := &prompb.ChunkedReadResponse{}
if err := proto.Unmarshal(p, cr); err != nil {
return 0, fmt.Errorf("unmarshaling: %w", err)
}
m.actual = append(m.actual, cr.ChunkedSeries...)
return len(p), nil
}
type mockChunkSeriesSet struct {
chunkedSeries []*prompb.ChunkedSeries
index int
}
func newMockChunkSeriesSet(ss []*prompb.ChunkedSeries) storage.ChunkSeriesSet {
return &mockChunkSeriesSet{chunkedSeries: ss, index: -1}
}
func (c *mockChunkSeriesSet) Next() bool {
c.index++
return c.index < len(c.chunkedSeries)
}
func (c *mockChunkSeriesSet) At() storage.ChunkSeries {
return &storage.ChunkSeriesEntry{
Lset: labelProtosToLabels(c.chunkedSeries[c.index].Labels),
ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
return &mockChunkIterator{
chunks: c.chunkedSeries[c.index].Chunks,
index: -1,
}
},
}
}
func (c *mockChunkSeriesSet) Warnings() storage.Warnings { return nil }
func (c *mockChunkSeriesSet) Err() error {
return nil
}
type mockChunkIterator struct {
chunks []prompb.Chunk
index int
}
func (c *mockChunkIterator) At() chunks.Meta {
one := c.chunks[c.index]
chunk, err := chunkenc.FromData(chunkenc.Encoding(one.Type), one.Data)
if err != nil {
panic(err)
}
return chunks.Meta{
Chunk: chunk,
MinTime: one.MinTimeMs,
MaxTime: one.MaxTimeMs,
}
}
func (c *mockChunkIterator) Next() bool {
c.index++
return c.index < len(c.chunks)
}
func (c *mockChunkIterator) Err() error {
return nil
}

View file

@ -396,7 +396,7 @@ type QueueManager struct {
flushDeadline time.Duration
cfg config.QueueConfig
mcfg config.MetadataConfig
externalLabels labels.Labels
externalLabels []labels.Label
relabelConfigs []*relabel.Config
sendExemplars bool
sendNativeHistograms bool
@ -454,13 +454,19 @@ func NewQueueManager(
logger = log.NewNopLogger()
}
// Copy externalLabels into slice which we need for processExternalLabels.
extLabelsSlice := make([]labels.Label, 0, externalLabels.Len())
externalLabels.Range(func(l labels.Label) {
extLabelsSlice = append(extLabelsSlice, l)
})
logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint())
t := &QueueManager{
logger: logger,
flushDeadline: flushDeadline,
cfg: cfg,
mcfg: mCfg,
externalLabels: externalLabels,
externalLabels: extLabelsSlice,
relabelConfigs: relabelConfigs,
storeClient: client,
sendExemplars: enableExemplarRemoteWrite,
@ -769,8 +775,8 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) {
t.seriesSegmentIndexes[s.Ref] = index
ls := processExternalLabels(s.Labels, t.externalLabels)
lbls := relabel.Process(ls, t.relabelConfigs...)
if len(lbls) == 0 {
lbls, keep := relabel.Process(ls, t.relabelConfigs...)
if !keep || lbls.IsEmpty() {
t.droppedSeries[s.Ref] = struct{}{}
continue
}
@ -831,44 +837,33 @@ func (t *QueueManager) client() WriteClient {
}
func (t *QueueManager) internLabels(lbls labels.Labels) {
for i, l := range lbls {
lbls[i].Name = t.interner.intern(l.Name)
lbls[i].Value = t.interner.intern(l.Value)
}
lbls.InternStrings(t.interner.intern)
}
func (t *QueueManager) releaseLabels(ls labels.Labels) {
for _, l := range ls {
t.interner.release(l.Name)
t.interner.release(l.Value)
}
ls.ReleaseStrings(t.interner.release)
}
// processExternalLabels merges externalLabels into ls. If ls contains
// a label in externalLabels, the value in ls wins.
func processExternalLabels(ls, externalLabels labels.Labels) labels.Labels {
i, j, result := 0, 0, make(labels.Labels, 0, len(ls)+len(externalLabels))
for i < len(ls) && j < len(externalLabels) {
if ls[i].Name < externalLabels[j].Name {
result = append(result, labels.Label{
Name: ls[i].Name,
Value: ls[i].Value,
})
i++
} else if ls[i].Name > externalLabels[j].Name {
result = append(result, externalLabels[j])
j++
} else {
result = append(result, labels.Label{
Name: ls[i].Name,
Value: ls[i].Value,
})
i++
func processExternalLabels(ls labels.Labels, externalLabels []labels.Label) labels.Labels {
b := labels.NewScratchBuilder(ls.Len() + len(externalLabels))
j := 0
ls.Range(func(l labels.Label) {
for j < len(externalLabels) && l.Name > externalLabels[j].Name {
b.Add(externalLabels[j].Name, externalLabels[j].Value)
j++
}
if j < len(externalLabels) && l.Name == externalLabels[j].Name {
j++
}
b.Add(l.Name, l.Value)
})
for ; j < len(externalLabels); j++ {
b.Add(externalLabels[j].Name, externalLabels[j].Value)
}
return append(append(result, ls[i:]...), externalLabels[j:]...)
return b.Labels()
}
func (t *QueueManager) updateShardsLoop() {

View file

@ -161,7 +161,7 @@ func TestMetadataDelivery(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
m.Start()
defer m.Stop()
@ -539,6 +539,7 @@ func TestShouldReshard(t *testing.T) {
func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSeries) {
samples := make([]record.RefSample, 0, numSamples)
series := make([]record.RefSeries, 0, numSeries)
b := labels.ScratchBuilder{}
for i := 0; i < numSeries; i++ {
name := fmt.Sprintf("test_metric_%d", i)
for j := 0; j < numSamples; j++ {
@ -548,9 +549,16 @@ func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([
V: float64(i),
})
}
// Create Labels that is name of series plus any extra labels supplied.
b.Reset()
b.Add(labels.MetricName, name)
for _, l := range extraLabels {
b.Add(l.Name, l.Value)
}
b.Sort()
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: append(labels.Labels{{Name: "__name__", Value: name}}, extraLabels...),
Labels: b.Labels(),
})
}
return samples, series
@ -603,7 +611,7 @@ func createHistograms(numSamples, numSeries int) ([]record.RefHistogramSample, [
}
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: labels.Labels{{Name: "__name__", Value: name}},
Labels: labels.FromStrings("__name__", name),
})
}
return histograms, series
@ -815,7 +823,7 @@ func BenchmarkSampleSend(b *testing.B) {
const numSeries = 10000
// Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics.
extraLabels := labels.Labels{
extraLabels := []labels.Label{
{Name: "kubernetes_io_arch", Value: "amd64"},
{Name: "kubernetes_io_instance_type", Value: "c3.somesize"},
{Name: "kubernetes_io_os", Value: "linux"},
@ -902,56 +910,63 @@ func BenchmarkStartup(b *testing.B) {
func TestProcessExternalLabels(t *testing.T) {
for _, tc := range []struct {
labels labels.Labels
externalLabels labels.Labels
externalLabels []labels.Label
expected labels.Labels
}{
// Test adding labels at the end.
{
labels: labels.Labels{{Name: "a", Value: "b"}},
externalLabels: labels.Labels{{Name: "c", Value: "d"}},
expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}},
labels: labels.FromStrings("a", "b"),
externalLabels: []labels.Label{{Name: "c", Value: "d"}},
expected: labels.FromStrings("a", "b", "c", "d"),
},
// Test adding labels at the beginning.
{
labels: labels.Labels{{Name: "c", Value: "d"}},
externalLabels: labels.Labels{{Name: "a", Value: "b"}},
expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}},
labels: labels.FromStrings("c", "d"),
externalLabels: []labels.Label{{Name: "a", Value: "b"}},
expected: labels.FromStrings("a", "b", "c", "d"),
},
// Test we don't override existing labels.
{
labels: labels.Labels{{Name: "a", Value: "b"}},
externalLabels: labels.Labels{{Name: "a", Value: "c"}},
expected: labels.Labels{{Name: "a", Value: "b"}},
labels: labels.FromStrings("a", "b"),
externalLabels: []labels.Label{{Name: "a", Value: "c"}},
expected: labels.FromStrings("a", "b"),
},
// Test empty externalLabels.
{
labels: labels.Labels{{Name: "a", Value: "b"}},
externalLabels: labels.Labels{},
expected: labels.Labels{{Name: "a", Value: "b"}},
labels: labels.FromStrings("a", "b"),
externalLabels: []labels.Label{},
expected: labels.FromStrings("a", "b"),
},
// Test empty labels.
{
labels: labels.Labels{},
externalLabels: labels.Labels{{Name: "a", Value: "b"}},
expected: labels.Labels{{Name: "a", Value: "b"}},
labels: labels.EmptyLabels(),
externalLabels: []labels.Label{{Name: "a", Value: "b"}},
expected: labels.FromStrings("a", "b"),
},
// Test labels is longer than externalLabels.
{
labels: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}},
externalLabels: labels.Labels{{Name: "e", Value: "f"}},
expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}, {Name: "e", Value: "f"}},
labels: labels.FromStrings("a", "b", "c", "d"),
externalLabels: []labels.Label{{Name: "e", Value: "f"}},
expected: labels.FromStrings("a", "b", "c", "d", "e", "f"),
},
// Test externalLabels is longer than labels.
{
labels: labels.Labels{{Name: "c", Value: "d"}},
externalLabels: labels.Labels{{Name: "a", Value: "b"}, {Name: "e", Value: "f"}},
expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}, {Name: "e", Value: "f"}},
labels: labels.FromStrings("c", "d"),
externalLabels: []labels.Label{{Name: "a", Value: "b"}, {Name: "e", Value: "f"}},
expected: labels.FromStrings("a", "b", "c", "d", "e", "f"),
},
// Adding with and without clashing labels.
{
labels: labels.FromStrings("a", "b", "c", "d"),
externalLabels: []labels.Label{{Name: "a", Value: "xxx"}, {Name: "c", Value: "yyy"}, {Name: "e", Value: "f"}},
expected: labels.FromStrings("a", "b", "c", "d", "e", "f"),
},
} {
require.Equal(t, tc.expected, processExternalLabels(tc.labels, tc.externalLabels))

View file

@ -180,9 +180,11 @@ func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers .
// We return the new set of matchers, along with a map of labels for which
// matchers were added, so that these can later be removed from the result
// time series again.
func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, labels.Labels) {
el := make(labels.Labels, len(q.externalLabels))
copy(el, q.externalLabels)
func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []string) {
el := make([]labels.Label, 0, q.externalLabels.Len())
q.externalLabels.Range(func(l labels.Label) {
el = append(el, l)
})
// ms won't be sorted, so have to O(n^2) the search.
for _, m := range ms {
@ -202,7 +204,11 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, lab
}
ms = append(ms, m)
}
return ms, el
names := make([]string, len(el))
for i := range el {
names[i] = el[i].Name
}
return ms, names
}
// LabelValues implements storage.Querier and is a noop.
@ -234,7 +240,8 @@ func (q *chunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, match
return storage.NewSeriesSetToChunkSet(q.querier.Select(sortSeries, hints, matchers...))
}
func newSeriesSetFilter(ss storage.SeriesSet, toFilter labels.Labels) storage.SeriesSet {
// Note strings in toFilter must be sorted.
func newSeriesSetFilter(ss storage.SeriesSet, toFilter []string) storage.SeriesSet {
return &seriesSetFilter{
SeriesSet: ss,
toFilter: toFilter,
@ -243,7 +250,7 @@ func newSeriesSetFilter(ss storage.SeriesSet, toFilter labels.Labels) storage.Se
type seriesSetFilter struct {
storage.SeriesSet
toFilter labels.Labels
toFilter []string // Label names to remove from result
querier storage.Querier
}
@ -264,20 +271,12 @@ func (ssf seriesSetFilter) At() storage.Series {
type seriesFilter struct {
storage.Series
toFilter labels.Labels
toFilter []string // Label names to remove from result
}
func (sf seriesFilter) Labels() labels.Labels {
labels := sf.Series.Labels()
for i, j := 0, 0; i < len(labels) && j < len(sf.toFilter); {
if labels[i].Name < sf.toFilter[j].Name {
i++
} else if labels[i].Name > sf.toFilter[j].Name {
j++
} else {
labels = labels[:i+copy(labels[i:], labels[i+1:])]
j++
}
}
return labels
b := labels.NewBuilder(sf.Series.Labels())
// todo: check if this is too inefficient.
b.Del(sf.toFilter...)
return b.Labels(labels.EmptyLabels())
}

View file

@ -110,7 +110,7 @@ func TestExternalLabelsQuerierAddExternalLabels(t *testing.T) {
el labels.Labels
inMatchers []*labels.Matcher
outMatchers []*labels.Matcher
added labels.Labels
added []string
}{
{
inMatchers: []*labels.Matcher{
@ -119,7 +119,7 @@ func TestExternalLabelsQuerierAddExternalLabels(t *testing.T) {
outMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "job", "api-server"),
},
added: labels.Labels{},
added: []string{},
},
{
el: labels.FromStrings("dc", "berlin-01", "region", "europe"),
@ -131,7 +131,7 @@ func TestExternalLabelsQuerierAddExternalLabels(t *testing.T) {
labels.MustNewMatcher(labels.MatchEqual, "region", "europe"),
labels.MustNewMatcher(labels.MatchEqual, "dc", "berlin-01"),
},
added: labels.FromStrings("dc", "berlin-01", "region", "europe"),
added: []string{"dc", "region"},
},
{
el: labels.FromStrings("dc", "berlin-01", "region", "europe"),
@ -144,7 +144,7 @@ func TestExternalLabelsQuerierAddExternalLabels(t *testing.T) {
labels.MustNewMatcher(labels.MatchEqual, "region", "europe"),
labels.MustNewMatcher(labels.MatchEqual, "dc", "munich-02"),
},
added: labels.FromStrings("region", "europe"),
added: []string{"region"},
},
}
@ -163,12 +163,12 @@ func TestExternalLabelsQuerierAddExternalLabels(t *testing.T) {
func TestSeriesSetFilter(t *testing.T) {
tests := []struct {
in *prompb.QueryResult
toRemove labels.Labels
toRemove []string
expected *prompb.QueryResult
}{
{
toRemove: labels.Labels{{Name: "foo", Value: "bar"}},
toRemove: []string{"foo"},
in: &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
{Labels: labelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b"), nil), Samples: []prompb.Sample{}},

View file

@ -91,7 +91,7 @@ func TestFilterExternalLabels(t *testing.T) {
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queryables))
require.Equal(t, 1, len(s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels))
require.Equal(t, 1, s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels.Len())
err := s.Close()
require.NoError(t, err)
@ -118,7 +118,7 @@ func TestIgnoreExternalLabels(t *testing.T) {
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queryables))
require.Equal(t, 0, len(s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels))
require.Equal(t, 0, s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels.Len())
err := s.Close()
require.NoError(t, err)

View file

@ -278,7 +278,7 @@ func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels,
return 0, nil
}
func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, h *histogram.Histogram) (storage.SeriesRef, error) {
func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
t.histograms++
if ts > t.highestTimestamp {
t.highestTimestamp = ts

View file

@ -67,11 +67,14 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// checkAppendExemplarError modifies the AppendExamplar's returned error based on the error cause.
func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error {
unwrapedErr := errors.Unwrap(err)
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case errors.Is(unwrapedErr, storage.ErrNotFound):
case errors.Is(unwrappedErr, storage.ErrNotFound):
return storage.ErrNotFound
case errors.Is(unwrapedErr, storage.ErrOutOfOrderExemplar):
case errors.Is(unwrappedErr, storage.ErrOutOfOrderExemplar):
*outOfOrderErrs++
level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
return nil
@ -98,8 +101,11 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
for _, s := range ts.Samples {
_, err = app.Append(0, labels, s.Timestamp, s.Value)
if err != nil {
unwrapedErr := errors.Unwrap(err)
if errors.Is(unwrapedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrapedErr, storage.ErrOutOfBounds) || errors.Is(unwrapedErr, storage.ErrDuplicateSampleForTimestamp) {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
@ -118,11 +124,15 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
}
}
// TODO(codesome): support float histograms.
for _, hp := range ts.Histograms {
hs := HistogramProtoToHistogram(hp)
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs)
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
// Althogh AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {

View file

@ -67,7 +67,7 @@ func TestRemoteWriteHandler(t *testing.T) {
for _, hp := range ts.Histograms {
h := HistogramProtoToHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, h}, appendable.histograms[k])
require.Equal(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
k++
}
}
@ -189,9 +189,10 @@ type mockExemplar struct {
}
type mockHistogram struct {
l labels.Labels
t int64
h *histogram.Histogram
l labels.Labels
t int64
h *histogram.Histogram
fh *histogram.FloatHistogram
}
func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
@ -226,13 +227,13 @@ func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e
return 0, nil
}
func (m *mockAppendable) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
func (m *mockAppendable) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if t < m.latestHistogram {
return 0, storage.ErrOutOfOrderSample
}
m.latestHistogram = t
m.histograms = append(m.histograms, mockHistogram{l, t, h})
m.histograms = append(m.histograms, mockHistogram{l, t, h, fh})
return 0, nil
}

View file

@ -228,14 +228,14 @@ func TestUpdateExternalLabels(t *testing.T) {
require.NoError(t, err)
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queues))
require.Equal(t, labels.Labels(nil), s.queues[hash].externalLabels)
require.Equal(t, 0, len(s.queues[hash].externalLabels))
conf.GlobalConfig.ExternalLabels = externalLabels
hash, err = toHash(conf.RemoteWriteConfigs[0])
require.NoError(t, err)
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queues))
require.Equal(t, externalLabels, s.queues[hash].externalLabels)
require.Equal(t, []labels.Label{{Name: "external", Value: "true"}}, s.queues[hash].externalLabels)
err = s.Close()
require.NoError(t, err)

View file

@ -27,26 +27,31 @@ import (
type SeriesEntry struct {
Lset labels.Labels
SampleIteratorFn func() chunkenc.Iterator
SampleIteratorFn func(chunkenc.Iterator) chunkenc.Iterator
}
func (s *SeriesEntry) Labels() labels.Labels { return s.Lset }
func (s *SeriesEntry) Iterator() chunkenc.Iterator { return s.SampleIteratorFn() }
func (s *SeriesEntry) Labels() labels.Labels { return s.Lset }
func (s *SeriesEntry) Iterator(it chunkenc.Iterator) chunkenc.Iterator { return s.SampleIteratorFn(it) }
type ChunkSeriesEntry struct {
Lset labels.Labels
ChunkIteratorFn func() chunks.Iterator
ChunkIteratorFn func(chunks.Iterator) chunks.Iterator
}
func (s *ChunkSeriesEntry) Labels() labels.Labels { return s.Lset }
func (s *ChunkSeriesEntry) Iterator() chunks.Iterator { return s.ChunkIteratorFn() }
func (s *ChunkSeriesEntry) Labels() labels.Labels { return s.Lset }
func (s *ChunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return s.ChunkIteratorFn(it) }
// NewListSeries returns series entry with iterator that allows to iterate over provided samples.
func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry {
samplesS := Samples(samples(s))
return &SeriesEntry{
Lset: lset,
SampleIteratorFn: func() chunkenc.Iterator {
return NewListSeriesIterator(samples(s))
SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator {
if lsi, ok := it.(*listSeriesIterator); ok {
lsi.Reset(samplesS)
return lsi
}
return NewListSeriesIterator(samplesS)
},
}
}
@ -56,11 +61,21 @@ func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry {
func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sample) *ChunkSeriesEntry {
return &ChunkSeriesEntry{
Lset: lset,
ChunkIteratorFn: func() chunks.Iterator {
chks := make([]chunks.Meta, 0, len(samples))
ChunkIteratorFn: func(it chunks.Iterator) chunks.Iterator {
lcsi, existing := it.(*listChunkSeriesIterator)
var chks []chunks.Meta
if existing {
chks = lcsi.chks[:0]
} else {
chks = make([]chunks.Meta, 0, len(samples))
}
for _, s := range samples {
chks = append(chks, tsdbutil.ChunkFromSamples(s))
}
if existing {
lcsi.Reset(chks...)
return lcsi
}
return NewListChunkSeriesIterator(chks...)
},
}
@ -87,6 +102,11 @@ func NewListSeriesIterator(samples Samples) chunkenc.Iterator {
return &listSeriesIterator{samples: samples, idx: -1}
}
func (it *listSeriesIterator) Reset(samples Samples) {
it.samples = samples
it.idx = -1
}
func (it *listSeriesIterator) At() (int64, float64) {
s := it.samples.Get(it.idx)
return s.T(), s.V()
@ -150,6 +170,11 @@ func NewListChunkSeriesIterator(chks ...chunks.Meta) chunks.Iterator {
return &listChunkSeriesIterator{chks: chks, idx: -1}
}
func (it *listChunkSeriesIterator) Reset(chks ...chunks.Meta) {
it.chks = chks
it.idx = -1
}
func (it *listChunkSeriesIterator) At() chunks.Meta {
return it.chks[it.idx]
}
@ -164,6 +189,7 @@ func (it *listChunkSeriesIterator) Err() error { return nil }
type chunkSetToSeriesSet struct {
ChunkSeriesSet
iter chunks.Iterator
chkIterErr error
sameSeriesChunks []Series
}
@ -178,18 +204,18 @@ func (c *chunkSetToSeriesSet) Next() bool {
return false
}
iter := c.ChunkSeriesSet.At().Iterator()
c.sameSeriesChunks = c.sameSeriesChunks[:0]
c.iter = c.ChunkSeriesSet.At().Iterator(c.iter)
c.sameSeriesChunks = nil
for iter.Next() {
for c.iter.Next() {
c.sameSeriesChunks = append(
c.sameSeriesChunks,
newChunkToSeriesDecoder(c.ChunkSeriesSet.At().Labels(), iter.At()),
newChunkToSeriesDecoder(c.ChunkSeriesSet.At().Labels(), c.iter.At()),
)
}
if iter.Err() != nil {
c.chkIterErr = iter.Err()
if c.iter.Err() != nil {
c.chkIterErr = c.iter.Err()
return false
}
return true
@ -210,9 +236,9 @@ func (c *chunkSetToSeriesSet) Err() error {
func newChunkToSeriesDecoder(labels labels.Labels, chk chunks.Meta) Series {
return &SeriesEntry{
Lset: labels,
SampleIteratorFn: func() chunkenc.Iterator {
SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator {
// TODO(bwplotka): Can we provide any chunkenc buffer?
return chk.Chunk.Iterator(nil)
return chk.Chunk.Iterator(it)
},
}
}
@ -252,7 +278,7 @@ func NewSeriesToChunkEncoder(series Series) ChunkSeries {
return &seriesToChunkEncoder{series}
}
func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
var (
chk chunkenc.Chunk
app chunkenc.Appender
@ -261,9 +287,14 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
mint := int64(math.MaxInt64)
maxt := int64(math.MinInt64)
chks := []chunks.Meta{}
var chks []chunks.Meta
lcsi, existing := it.(*listChunkSeriesIterator)
if existing {
chks = lcsi.chks[:0]
}
i := 0
seriesIter := s.Series.Iterator()
seriesIter := s.Series.Iterator(nil)
lastType := chunkenc.ValNone
for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() {
if typ != lastType || i >= seriesToChunkEncoderSplit {
@ -290,9 +321,10 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
lastType = typ
var (
t int64
v float64
h *histogram.Histogram
t int64
v float64
h *histogram.Histogram
fh *histogram.FloatHistogram
)
switch typ {
case chunkenc.ValFloat:
@ -301,6 +333,9 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
case chunkenc.ValHistogram:
t, h = seriesIter.AtHistogram()
app.AppendHistogram(t, h)
case chunkenc.ValFloatHistogram:
t, fh = seriesIter.AtFloatHistogram()
app.AppendFloatHistogram(t, fh)
default:
return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())}
}
@ -323,6 +358,10 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
})
}
if existing {
lcsi.Reset(chks...)
return lcsi
}
return NewListChunkSeriesIterator(chks...)
}
@ -362,7 +401,6 @@ func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64,
case chunkenc.ValFloatHistogram:
t, fh := iter.AtFloatHistogram()
result = append(result, newSampleFn(t, 0, nil, fh))
}
}
}

View file

@ -18,7 +18,9 @@ import (
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
)
func TestListSeriesIterator(t *testing.T) {
@ -65,3 +67,61 @@ func TestListSeriesIterator(t *testing.T) {
// And we don't go back. (This exposes issue #10027.)
require.Equal(t, chunkenc.ValNone, it.Seek(2))
}
// TestSeriesSetToChunkSet test the property of SeriesSet that says
// returned series should be iterable even after Next is called.
func TestChunkSeriesSetToSeriesSet(t *testing.T) {
series := []struct {
lbs labels.Labels
samples []tsdbutil.Sample
}{
{
lbs: labels.Labels{
{Name: "__name__", Value: "up"},
{Name: "instance", Value: "localhost:8080"},
},
samples: []tsdbutil.Sample{
sample{t: 1, v: 1},
sample{t: 2, v: 2},
sample{t: 3, v: 3},
sample{t: 4, v: 4},
},
}, {
lbs: labels.Labels{
{Name: "__name__", Value: "up"},
{Name: "instance", Value: "localhost:8081"},
},
samples: []tsdbutil.Sample{
sample{t: 1, v: 2},
sample{t: 2, v: 3},
sample{t: 3, v: 4},
sample{t: 4, v: 5},
sample{t: 5, v: 6},
sample{t: 6, v: 7},
},
},
}
var chunkSeries []ChunkSeries
for _, s := range series {
chunkSeries = append(chunkSeries, NewListChunkSeriesFromSamples(s.lbs, s.samples))
}
css := NewMockChunkSeriesSet(chunkSeries...)
ss := NewSeriesSetFromChunkSeriesSet(css)
var ssSlice []Series
for ss.Next() {
ssSlice = append(ssSlice, ss.At())
}
require.Len(t, ssSlice, 2)
var iter chunkenc.Iterator
for i, s := range ssSlice {
require.EqualValues(t, series[i].lbs, s.Labels())
iter = s.Iterator(iter)
j := 0
for iter.Next() == chunkenc.ValFloat {
ts, v := iter.At()
require.EqualValues(t, series[i].samples[j], sample{t: ts, v: v})
j++
}
}
}

Some files were not shown because too many files have changed in this diff Show more