Merge branch 'main' into sparsehistogram

This commit is contained in:
beorn7 2022-08-10 17:54:37 +02:00
commit c9fd3c235d
90 changed files with 2201 additions and 571 deletions

View file

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: bufbuild/buf-setup-action@v1.6.0 - uses: bufbuild/buf-setup-action@v1.7.0
- uses: bufbuild/buf-lint-action@v1 - uses: bufbuild/buf-lint-action@v1
with: with:
input: 'prompb' input: 'prompb'

View file

@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: bufbuild/buf-setup-action@v1.6.0 - uses: bufbuild/buf-setup-action@v1.7.0
- uses: bufbuild/buf-lint-action@v1 - uses: bufbuild/buf-lint-action@v1
with: with:
input: 'prompb' input: 'prompb'

View file

@ -17,7 +17,8 @@ jobs:
uses: actions/setup-node@v3 uses: actions/setup-node@v3
with: with:
node-version-file: "web/ui/.nvmrc" node-version-file: "web/ui/.nvmrc"
- uses: actions/cache@v3.0.4 registry-url: "https://registry.npmjs.org"
- uses: actions/cache@v3.0.5
with: with:
path: ~/.npm path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}

View file

@ -1,6 +1,11 @@
# Changelog # Changelog
## 2.37.0-rc.0 / 2022-07-05 ## 2.37.0 / 2022-07-14
This release is a LTS (Long-Term Support) release of Prometheus and will
receive security, documentation and bugfix patches for at least 6 months.
Please read more about our LTS release cycle at
<https://prometheus.io/docs/introduction/release-cycle/>.
Following data loss by users due to lack of unified buffer cache in OpenBSD, we Following data loss by users due to lack of unified buffer cache in OpenBSD, we
will no longer release Prometheus upstream for OpenBSD until a proper solution is will no longer release Prometheus upstream for OpenBSD until a proper solution is
@ -11,6 +16,7 @@ found. #8799
* [ENHANCEMENT] PromQL: Optimise creation of signature with/without labels. #10667 * [ENHANCEMENT] PromQL: Optimise creation of signature with/without labels. #10667
* [ENHANCEMENT] TSDB: Memory optimizations. #10873 #10874 * [ENHANCEMENT] TSDB: Memory optimizations. #10873 #10874
* [ENHANCEMENT] TSDB: Reduce sleep time when reading WAL. #10859 #10878 * [ENHANCEMENT] TSDB: Reduce sleep time when reading WAL. #10859 #10878
* [ENHANCEMENT] OAuth2: Add appropriate timeouts and User-Agent header. #11020
* [BUGFIX] Alerting: Fix Alertmanager targets not being updated when alerts were queued. #10948 * [BUGFIX] Alerting: Fix Alertmanager targets not being updated when alerts were queued. #10948
* [BUGFIX] Hetzner SD: Make authentication files relative to Prometheus config file. #10813 * [BUGFIX] Hetzner SD: Make authentication files relative to Prometheus config file. #10813
* [BUGFIX] Promtool: Fix `promtool check config` not erroring properly on failures. #10952 * [BUGFIX] Promtool: Fix `promtool check config` not erroring properly on failures. #10952

View file

@ -42,7 +42,7 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.35 | 2022-04-06 | Augustin Husson (GitHub: @nexucis) | | v2.35 | 2022-04-06 | Augustin Husson (GitHub: @nexucis) |
| v2.36 | 2022-05-18 | Matthias Loibl (GitHub: @metalmatze) | | v2.36 | 2022-05-18 | Matthias Loibl (GitHub: @metalmatze) |
| v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) | | v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) |
| v2.38 | 2022-08-10 | **searching for volunteer** | | v2.38 | 2022-08-10 | Julius Volz (GitHub: @juliusv) |
| v2.39 | 2022-09-21 | **searching for volunteer** | | v2.39 | 2022-09-21 | **searching for volunteer** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
@ -139,6 +139,7 @@ For release candidates still update `CHANGELOG.md`, but when you cut the final r
Entries in the `CHANGELOG.md` are meant to be in this order: Entries in the `CHANGELOG.md` are meant to be in this order:
* `[SECURITY]` - A bugfix that specifically fixes a security issue.
* `[CHANGE]` * `[CHANGE]`
* `[FEATURE]` * `[FEATURE]`
* `[ENHANCEMENT]` * `[ENHANCEMENT]`

View file

@ -1 +1 @@
2.37.0-rc.0 2.37.0

View file

@ -59,6 +59,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/notifier"
_ "github.com/prometheus/prometheus/plugins" // Register plugins. _ "github.com/prometheus/prometheus/plugins" // Register plugins.
@ -179,7 +180,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled") level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled")
case "extra-scrape-metrics": case "extra-scrape-metrics":
c.scrape.ExtraMetrics = true c.scrape.ExtraMetrics = true
level.Info(logger).Log("msg", "Experimental additional scrape metrics") level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
case "new-service-discovery-manager": case "new-service-discovery-manager":
c.enableNewSDManager = true c.enableNewSDManager = true
level.Info(logger).Log("msg", "Experimental service discovery manager") level.Info(logger).Log("msg", "Experimental service discovery manager")
@ -192,6 +193,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "auto-gomaxprocs": case "auto-gomaxprocs":
c.enableAutoGOMAXPROCS = true c.enableAutoGOMAXPROCS = true
level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota") level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota")
case "no-default-scrape-port":
c.scrape.NoDefaultPort = true
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
case "": case "":
continue continue
case "promql-at-modifier", "promql-negative-offset": case "promql-at-modifier", "promql-negative-offset":
@ -391,7 +395,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
promlogflag.AddFlags(a, &cfg.promlogConfig) promlogflag.AddFlags(a, &cfg.promlogConfig)
@ -1408,6 +1412,10 @@ func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels
return 0, tsdb.ErrNotReady return 0, tsdb.ErrNotReady
} }
func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady
}
func (n notReadyAppender) Commit() error { return tsdb.ErrNotReady } func (n notReadyAppender) Commit() error { return tsdb.ErrNotReady }
func (n notReadyAppender) Rollback() error { return tsdb.ErrNotReady } func (n notReadyAppender) Rollback() error { return tsdb.ErrNotReady }

View file

@ -38,6 +38,8 @@ import (
"github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/rules"
) )
const startupTime = 10 * time.Second
var ( var (
promPath = os.Args[0] promPath = os.Args[0]
promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml") promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
@ -226,7 +228,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
select { select {
case err := <-done: case err := <-done:
t.Errorf("prometheus should be still running: %v", err) t.Errorf("prometheus should be still running: %v", err)
case <-time.After(5 * time.Second): case <-time.After(startupTime):
prom.Process.Kill() prom.Process.Kill()
<-done <-done
} }
@ -272,7 +274,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
select { select {
case err := <-done: case err := <-done:
t.Errorf("prometheus should be still running: %v", err) t.Errorf("prometheus should be still running: %v", err)
case <-time.After(5 * time.Second): case <-time.After(startupTime):
prom.Process.Kill() prom.Process.Kill()
<-done <-done
} }
@ -366,7 +368,7 @@ func TestAgentSuccessfulStartup(t *testing.T) {
case err := <-done: case err := <-done:
t.Logf("prometheus agent should be still running: %v", err) t.Logf("prometheus agent should be still running: %v", err)
actualExitStatus = prom.ProcessState.ExitCode() actualExitStatus = prom.ProcessState.ExitCode()
case <-time.After(5 * time.Second): case <-time.After(startupTime):
prom.Process.Kill() prom.Process.Kill()
} }
require.Equal(t, 0, actualExitStatus) require.Equal(t, 0, actualExitStatus)
@ -387,7 +389,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
case err := <-done: case err := <-done:
t.Logf("prometheus agent should not be running: %v", err) t.Logf("prometheus agent should not be running: %v", err)
actualExitStatus = prom.ProcessState.ExitCode() actualExitStatus = prom.ProcessState.ExitCode()
case <-time.After(5 * time.Second): case <-time.After(startupTime):
prom.Process.Kill() prom.Process.Kill()
} }
@ -411,7 +413,7 @@ func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
case err := <-done: case err := <-done:
t.Logf("prometheus agent should not be running: %v", err) t.Logf("prometheus agent should not be running: %v", err)
actualExitStatus = prom.ProcessState.ExitCode() actualExitStatus = prom.ProcessState.ExitCode()
case <-time.After(5 * time.Second): case <-time.After(startupTime):
prom.Process.Kill() prom.Process.Kill()
} }
require.Equal(t, 2, actualExitStatus) require.Equal(t, 2, actualExitStatus)
@ -462,7 +464,7 @@ func TestModeSpecificFlags(t *testing.T) {
select { select {
case err := <-done: case err := <-done:
t.Errorf("prometheus should be still running: %v", err) t.Errorf("prometheus should be still running: %v", err)
case <-time.After(5 * time.Second): case <-time.After(startupTime):
prom.Process.Kill() prom.Process.Kill()
<-done <-done
} }

View file

@ -211,7 +211,7 @@ func main() {
"A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated.", "A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated.",
).Required().ExistingFiles() ).Required().ExistingFiles()
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings() featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
parsedCmd := kingpin.MustParse(app.Parse(os.Args[1:])) parsedCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
@ -223,10 +223,13 @@ func main() {
p = &promqlPrinter{} p = &promqlPrinter{}
} }
var noDefaultScrapePort bool
for _, f := range *featureList { for _, f := range *featureList {
opts := strings.Split(f, ",") opts := strings.Split(f, ",")
for _, o := range opts { for _, o := range opts {
switch o { switch o {
case "no-default-scrape-port":
noDefaultScrapePort = true
case "": case "":
continue continue
case "promql-at-modifier", "promql-negative-offset": case "promql-at-modifier", "promql-negative-offset":
@ -239,7 +242,7 @@ func main() {
switch parsedCmd { switch parsedCmd {
case sdCheckCmd.FullCommand(): case sdCheckCmd.FullCommand():
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout)) os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort))
case checkConfigCmd.FullCommand(): case checkConfigCmd.FullCommand():
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
@ -1217,7 +1220,7 @@ func checkTargetGroupsForAlertmanager(targetGroups []*targetgroup.Group, amcfg *
func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error { func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error {
for _, tg := range targetGroups { for _, tg := range targetGroups {
_, failures := scrape.TargetsFromGroup(tg, scfg) _, failures := scrape.TargetsFromGroup(tg, scfg, false)
if len(failures) > 0 { if len(failures) > 0 {
first := failures[0] first := failures[0]
return first return first

View file

@ -34,7 +34,7 @@ import (
const maxSamplesInMemory = 5000 const maxSamplesInMemory = 5000
type queryRangeAPI interface { type queryRangeAPI interface {
QueryRange(ctx context.Context, query string, r v1.Range) (model.Value, v1.Warnings, error) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error)
} }
type ruleImporter struct { type ruleImporter struct {

View file

@ -35,7 +35,7 @@ type mockQueryRangeAPI struct {
samples model.Matrix samples model.Matrix
} }
func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range) (model.Value, v1.Warnings, error) { func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {
return mockAPI.samples, v1.Warnings{}, nil return mockAPI.samples, v1.Warnings{}, nil
} }

View file

@ -37,7 +37,7 @@ type sdCheckResult struct {
} }
// CheckSD performs service discovery for the given job name and reports the results. // CheckSD performs service discovery for the given job name and reports the results.
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration) int { func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool) int {
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
@ -94,7 +94,7 @@ outerLoop:
} }
results := []sdCheckResult{} results := []sdCheckResult{}
for _, tgs := range sdCheckResults { for _, tgs := range sdCheckResults {
results = append(results, getSDCheckResult(tgs, scrapeConfig)...) results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...)
} }
res, err := json.MarshalIndent(results, "", " ") res, err := json.MarshalIndent(results, "", " ")
@ -107,7 +107,7 @@ outerLoop:
return successExitCode return successExitCode
} }
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult { func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult {
sdCheckResults := []sdCheckResult{} sdCheckResults := []sdCheckResult{}
for _, targetGroup := range targetGroups { for _, targetGroup := range targetGroups {
for _, target := range targetGroup.Targets { for _, target := range targetGroup.Targets {
@ -124,7 +124,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
} }
targetLabels := labels.New(labelSlice...) targetLabels := labels.New(labelSlice...)
res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig) res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig, noDefaultScrapePort)
result := sdCheckResult{ result := sdCheckResult{
DiscoveredLabels: orig, DiscoveredLabels: orig,
Labels: res, Labels: res,

View file

@ -69,5 +69,5 @@ func TestSDCheckResult(t *testing.T) {
}, },
} }
require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig)) require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true))
} }

View file

@ -40,6 +40,8 @@ const (
dnsSrvRecordPrefix = model.MetaLabelPrefix + "dns_srv_record_" dnsSrvRecordPrefix = model.MetaLabelPrefix + "dns_srv_record_"
dnsSrvRecordTargetLabel = dnsSrvRecordPrefix + "target" dnsSrvRecordTargetLabel = dnsSrvRecordPrefix + "target"
dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port" dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port"
dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_"
dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target"
// Constants for instrumentation. // Constants for instrumentation.
namespace = "prometheus" namespace = "prometheus"
@ -100,7 +102,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
} }
switch strings.ToUpper(c.Type) { switch strings.ToUpper(c.Type) {
case "SRV": case "SRV":
case "A", "AAAA": case "A", "AAAA", "MX":
if c.Port == 0 { if c.Port == 0 {
return errors.New("a port is required in DNS-SD configs for all record types except SRV") return errors.New("a port is required in DNS-SD configs for all record types except SRV")
} }
@ -136,6 +138,8 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery {
qtype = dns.TypeAAAA qtype = dns.TypeAAAA
case "SRV": case "SRV":
qtype = dns.TypeSRV qtype = dns.TypeSRV
case "MX":
qtype = dns.TypeMX
} }
d := &Discovery{ d := &Discovery{
names: conf.Names, names: conf.Names,
@ -195,7 +199,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
} }
for _, record := range response.Answer { for _, record := range response.Answer {
var target, dnsSrvRecordTarget, dnsSrvRecordPort model.LabelValue var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget model.LabelValue
switch addr := record.(type) { switch addr := record.(type) {
case *dns.SRV: case *dns.SRV:
@ -206,6 +210,13 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
addr.Target = strings.TrimRight(addr.Target, ".") addr.Target = strings.TrimRight(addr.Target, ".")
target = hostPort(addr.Target, int(addr.Port)) target = hostPort(addr.Target, int(addr.Port))
case *dns.MX:
dnsMxRecordTarget = model.LabelValue(addr.Mx)
// Remove the final dot from rooted DNS names to make them look more usual.
addr.Mx = strings.TrimRight(addr.Mx, ".")
target = hostPort(addr.Mx, d.port)
case *dns.A: case *dns.A:
target = hostPort(addr.A.String(), d.port) target = hostPort(addr.A.String(), d.port)
case *dns.AAAA: case *dns.AAAA:
@ -222,6 +233,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
dnsNameLabel: model.LabelValue(name), dnsNameLabel: model.LabelValue(name),
dnsSrvRecordTargetLabel: dnsSrvRecordTarget, dnsSrvRecordTargetLabel: dnsSrvRecordTarget,
dnsSrvRecordPortLabel: dnsSrvRecordPort, dnsSrvRecordPortLabel: dnsSrvRecordPort,
dnsMxRecordTargetLabel: dnsMxRecordTarget,
}) })
} }

View file

@ -80,6 +80,7 @@ func TestDNS(t *testing.T) {
"__meta_dns_name": "web.example.com.", "__meta_dns_name": "web.example.com.",
"__meta_dns_srv_record_target": "", "__meta_dns_srv_record_target": "",
"__meta_dns_srv_record_port": "", "__meta_dns_srv_record_port": "",
"__meta_dns_mx_record_target": "",
}, },
}, },
}, },
@ -110,6 +111,7 @@ func TestDNS(t *testing.T) {
"__meta_dns_name": "web.example.com.", "__meta_dns_name": "web.example.com.",
"__meta_dns_srv_record_target": "", "__meta_dns_srv_record_target": "",
"__meta_dns_srv_record_port": "", "__meta_dns_srv_record_port": "",
"__meta_dns_mx_record_target": "",
}, },
}, },
}, },
@ -140,12 +142,14 @@ func TestDNS(t *testing.T) {
"__meta_dns_name": "_mysql._tcp.db.example.com.", "__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db1.example.com.", "__meta_dns_srv_record_target": "db1.example.com.",
"__meta_dns_srv_record_port": "3306", "__meta_dns_srv_record_port": "3306",
"__meta_dns_mx_record_target": "",
}, },
{ {
"__address__": "db2.example.com:3306", "__address__": "db2.example.com:3306",
"__meta_dns_name": "_mysql._tcp.db.example.com.", "__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db2.example.com.", "__meta_dns_srv_record_target": "db2.example.com.",
"__meta_dns_srv_record_port": "3306", "__meta_dns_srv_record_port": "3306",
"__meta_dns_mx_record_target": "",
}, },
}, },
}, },
@ -175,6 +179,7 @@ func TestDNS(t *testing.T) {
"__meta_dns_name": "_mysql._tcp.db.example.com.", "__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db1.example.com.", "__meta_dns_srv_record_target": "db1.example.com.",
"__meta_dns_srv_record_port": "3306", "__meta_dns_srv_record_port": "3306",
"__meta_dns_mx_record_target": "",
}, },
}, },
}, },
@ -195,6 +200,45 @@ func TestDNS(t *testing.T) {
}, },
}, },
}, },
{
name: "MX record query",
config: SDConfig{
Names: []string{"example.com."},
Type: "MX",
Port: 25,
RefreshInterval: model.Duration(time.Minute),
},
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
return &dns.Msg{
Answer: []dns.RR{
&dns.MX{Preference: 0, Mx: "smtp1.example.com."},
&dns.MX{Preference: 10, Mx: "smtp2.example.com."},
},
},
nil
},
expected: []*targetgroup.Group{
{
Source: "example.com.",
Targets: []model.LabelSet{
{
"__address__": "smtp1.example.com:25",
"__meta_dns_name": "example.com.",
"__meta_dns_srv_record_target": "",
"__meta_dns_srv_record_port": "",
"__meta_dns_mx_record_target": "smtp1.example.com.",
},
{
"__address__": "smtp2.example.com:25",
"__meta_dns_name": "example.com.",
"__meta_dns_srv_record_target": "",
"__meta_dns_srv_record_port": "",
"__meta_dns_mx_record_target": "smtp2.example.com.",
},
},
},
},
},
} }
for _, tc := range testCases { for _, tc := range testCases {

View file

@ -322,6 +322,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
ports := strconv.FormatUint(uint64(port.Port), 10) ports := strconv.FormatUint(uint64(port.Port), 10)
target[podContainerNameLabel] = lv(c.Name) target[podContainerNameLabel] = lv(c.Name)
target[podContainerImageLabel] = lv(c.Image)
target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNameLabel] = lv(cport.Name)
target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortNumberLabel] = lv(ports)
target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) target[podContainerPortProtocolLabel] = lv(string(port.Protocol))
@ -380,6 +381,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
target := model.LabelSet{ target := model.LabelSet{
model.AddressLabel: lv(a), model.AddressLabel: lv(a),
podContainerNameLabel: lv(c.Name), podContainerNameLabel: lv(c.Name),
podContainerImageLabel: lv(c.Image),
podContainerPortNameLabel: lv(cport.Name), podContainerPortNameLabel: lv(cport.Name),
podContainerPortNumberLabel: lv(ports), podContainerPortNumberLabel: lv(ports),
podContainerPortProtocolLabel: lv(string(cport.Protocol)), podContainerPortProtocolLabel: lv(string(cport.Protocol)),

View file

@ -128,7 +128,8 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
NodeName: "testnode", NodeName: "testnode",
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "c1", Name: "c1",
Image: "c1:latest",
Ports: []v1.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "mainport", Name: "mainport",
@ -138,7 +139,8 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
}, },
}, },
{ {
Name: "c2", Name: "c2",
Image: "c2:latest",
Ports: []v1.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "sideport", Name: "sideport",
@ -206,6 +208,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
"__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_container_name": "c1", "__meta_kubernetes_pod_container_name": "c1",
"__meta_kubernetes_pod_container_image": "c1:latest",
"__meta_kubernetes_pod_container_port_name": "mainport", "__meta_kubernetes_pod_container_port_name": "mainport",
"__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
@ -220,6 +223,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
"__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_container_name": "c2", "__meta_kubernetes_pod_container_name": "c2",
"__meta_kubernetes_pod_container_image": "c2:latest",
"__meta_kubernetes_pod_container_port_name": "sideport", "__meta_kubernetes_pod_container_port_name": "sideport",
"__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_number": "9001",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
@ -649,7 +653,8 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
NodeName: "testnode", NodeName: "testnode",
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "c1", Name: "c1",
Image: "c1:latest",
Ports: []v1.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "mainport", Name: "mainport",
@ -720,6 +725,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_container_name": "c1", "__meta_kubernetes_pod_container_name": "c1",
"__meta_kubernetes_pod_container_image": "c1:latest",
"__meta_kubernetes_pod_container_port_name": "mainport", "__meta_kubernetes_pod_container_port_name": "mainport",
"__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
@ -753,7 +759,8 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
NodeName: "testnode", NodeName: "testnode",
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "p1", Name: "p1",
Image: "p1:latest",
Ports: []v1.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "mainport", Name: "mainport",

View file

@ -350,6 +350,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
ports := strconv.FormatUint(uint64(*port.port()), 10) ports := strconv.FormatUint(uint64(*port.port()), 10)
target[podContainerNameLabel] = lv(c.Name) target[podContainerNameLabel] = lv(c.Name)
target[podContainerImageLabel] = lv(c.Image)
target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNameLabel] = lv(cport.Name)
target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortNumberLabel] = lv(ports)
target[podContainerPortProtocolLabel] = lv(string(cport.Protocol)) target[podContainerPortProtocolLabel] = lv(string(cport.Protocol))
@ -398,6 +399,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
target := model.LabelSet{ target := model.LabelSet{
model.AddressLabel: lv(a), model.AddressLabel: lv(a),
podContainerNameLabel: lv(c.Name), podContainerNameLabel: lv(c.Name),
podContainerImageLabel: lv(c.Image),
podContainerPortNameLabel: lv(cport.Name), podContainerPortNameLabel: lv(cport.Name),
podContainerPortNumberLabel: lv(ports), podContainerPortNumberLabel: lv(ports),
podContainerPortProtocolLabel: lv(string(cport.Protocol)), podContainerPortProtocolLabel: lv(string(cport.Protocol)),

View file

@ -234,7 +234,8 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
NodeName: "testnode", NodeName: "testnode",
Containers: []corev1.Container{ Containers: []corev1.Container{
{ {
Name: "c1", Name: "c1",
Image: "c1:latest",
Ports: []corev1.ContainerPort{ Ports: []corev1.ContainerPort{
{ {
Name: "mainport", Name: "mainport",
@ -244,7 +245,8 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
}, },
}, },
{ {
Name: "c2", Name: "c2",
Image: "c2:latest",
Ports: []corev1.ContainerPort{ Ports: []corev1.ContainerPort{
{ {
Name: "sideport", Name: "sideport",
@ -307,6 +309,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_pod_container_name": "c1", "__meta_kubernetes_pod_container_name": "c1",
"__meta_kubernetes_pod_container_image": "c1:latest",
"__meta_kubernetes_pod_container_port_name": "mainport", "__meta_kubernetes_pod_container_port_name": "mainport",
"__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
@ -321,6 +324,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
{ {
"__address__": "1.2.3.4:9001", "__address__": "1.2.3.4:9001",
"__meta_kubernetes_pod_container_name": "c2", "__meta_kubernetes_pod_container_name": "c2",
"__meta_kubernetes_pod_container_image": "c2:latest",
"__meta_kubernetes_pod_container_port_name": "sideport", "__meta_kubernetes_pod_container_port_name": "sideport",
"__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_number": "9001",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
@ -878,7 +882,8 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
NodeName: "testnode", NodeName: "testnode",
Containers: []corev1.Container{ Containers: []corev1.Container{
{ {
Name: "c1", Name: "c1",
Image: "c1:latest",
Ports: []corev1.ContainerPort{ Ports: []corev1.ContainerPort{
{ {
Name: "mainport", Name: "mainport",
@ -953,6 +958,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_pod_container_name": "c1", "__meta_kubernetes_pod_container_name": "c1",
"__meta_kubernetes_pod_container_image": "c1:latest",
"__meta_kubernetes_pod_container_port_name": "mainport", "__meta_kubernetes_pod_container_port_name": "mainport",
"__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
@ -993,7 +999,8 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
NodeName: "testnode", NodeName: "testnode",
Containers: []corev1.Container{ Containers: []corev1.Container{
{ {
Name: "p1", Name: "p1",
Image: "p1:latest",
Ports: []corev1.ContainerPort{ Ports: []corev1.ContainerPort{
{ {
Name: "mainport", Name: "mainport",

View file

@ -177,6 +177,7 @@ const (
podNameLabel = metaLabelPrefix + "pod_name" podNameLabel = metaLabelPrefix + "pod_name"
podIPLabel = metaLabelPrefix + "pod_ip" podIPLabel = metaLabelPrefix + "pod_ip"
podContainerNameLabel = metaLabelPrefix + "pod_container_name" podContainerNameLabel = metaLabelPrefix + "pod_container_name"
podContainerImageLabel = metaLabelPrefix + "pod_container_image"
podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name" podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name"
podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number" podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number"
podContainerPortProtocolLabel = metaLabelPrefix + "pod_container_port_protocol" podContainerPortProtocolLabel = metaLabelPrefix + "pod_container_port_protocol"
@ -266,9 +267,10 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
// We don't have a port so we just set the address label to the pod IP. // We don't have a port so we just set the address label to the pod IP.
// The user has to add a port manually. // The user has to add a port manually.
tg.Targets = append(tg.Targets, model.LabelSet{ tg.Targets = append(tg.Targets, model.LabelSet{
model.AddressLabel: lv(pod.Status.PodIP), model.AddressLabel: lv(pod.Status.PodIP),
podContainerNameLabel: lv(c.Name), podContainerNameLabel: lv(c.Name),
podContainerIsInit: lv(strconv.FormatBool(isInit)), podContainerImageLabel: lv(c.Image),
podContainerIsInit: lv(strconv.FormatBool(isInit)),
}) })
continue continue
} }
@ -280,6 +282,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
tg.Targets = append(tg.Targets, model.LabelSet{ tg.Targets = append(tg.Targets, model.LabelSet{
model.AddressLabel: lv(addr), model.AddressLabel: lv(addr),
podContainerNameLabel: lv(c.Name), podContainerNameLabel: lv(c.Name),
podContainerImageLabel: lv(c.Image),
podContainerPortNumberLabel: lv(ports), podContainerPortNumberLabel: lv(ports),
podContainerPortNameLabel: lv(port.Name), podContainerPortNameLabel: lv(port.Name),
podContainerPortProtocolLabel: lv(string(port.Protocol)), podContainerPortProtocolLabel: lv(string(port.Protocol)),

View file

@ -50,7 +50,8 @@ func makeMultiPortPods() *v1.Pod {
NodeName: "testnode", NodeName: "testnode",
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "testcontainer0", Name: "testcontainer0",
Image: "testcontainer0:latest",
Ports: []v1.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "testport0", Name: "testport0",
@ -65,7 +66,8 @@ func makeMultiPortPods() *v1.Pod {
}, },
}, },
{ {
Name: "testcontainer1", Name: "testcontainer1",
Image: "testcontainer1:latest",
}, },
}, },
}, },
@ -94,7 +96,8 @@ func makePods() *v1.Pod {
NodeName: "testnode", NodeName: "testnode",
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "testcontainer", Name: "testcontainer",
Image: "testcontainer:latest",
Ports: []v1.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "testport", Name: "testport",
@ -130,7 +133,8 @@ func makeInitContainerPods() *v1.Pod {
NodeName: "testnode", NodeName: "testnode",
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "testcontainer", Name: "testcontainer",
Image: "testcontainer:latest",
Ports: []v1.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "testport", Name: "testport",
@ -143,7 +147,8 @@ func makeInitContainerPods() *v1.Pod {
InitContainers: []v1.Container{ InitContainers: []v1.Container{
{ {
Name: "initcontainer", Name: "initcontainer",
Image: "initcontainer:latest",
}, },
}, },
}, },
@ -169,6 +174,7 @@ func expectedPodTargetGroups(ns string) map[string]*targetgroup.Group {
{ {
"__address__": "1.2.3.4:9000", "__address__": "1.2.3.4:9000",
"__meta_kubernetes_pod_container_name": "testcontainer", "__meta_kubernetes_pod_container_name": "testcontainer",
"__meta_kubernetes_pod_container_image": "testcontainer:latest",
"__meta_kubernetes_pod_container_port_name": "testport", "__meta_kubernetes_pod_container_port_name": "testport",
"__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
@ -219,6 +225,7 @@ func TestPodDiscoveryBeforeRun(t *testing.T) {
{ {
"__address__": "1.2.3.4:9000", "__address__": "1.2.3.4:9000",
"__meta_kubernetes_pod_container_name": "testcontainer0", "__meta_kubernetes_pod_container_name": "testcontainer0",
"__meta_kubernetes_pod_container_image": "testcontainer0:latest",
"__meta_kubernetes_pod_container_port_name": "testport0", "__meta_kubernetes_pod_container_port_name": "testport0",
"__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
@ -227,15 +234,17 @@ func TestPodDiscoveryBeforeRun(t *testing.T) {
{ {
"__address__": "1.2.3.4:9001", "__address__": "1.2.3.4:9001",
"__meta_kubernetes_pod_container_name": "testcontainer0", "__meta_kubernetes_pod_container_name": "testcontainer0",
"__meta_kubernetes_pod_container_image": "testcontainer0:latest",
"__meta_kubernetes_pod_container_port_name": "testport1", "__meta_kubernetes_pod_container_port_name": "testport1",
"__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_number": "9001",
"__meta_kubernetes_pod_container_port_protocol": "UDP", "__meta_kubernetes_pod_container_port_protocol": "UDP",
"__meta_kubernetes_pod_container_init": "false", "__meta_kubernetes_pod_container_init": "false",
}, },
{ {
"__address__": "1.2.3.4", "__address__": "1.2.3.4",
"__meta_kubernetes_pod_container_name": "testcontainer1", "__meta_kubernetes_pod_container_name": "testcontainer1",
"__meta_kubernetes_pod_container_init": "false", "__meta_kubernetes_pod_container_image": "testcontainer1:latest",
"__meta_kubernetes_pod_container_init": "false",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -267,9 +276,10 @@ func TestPodDiscoveryInitContainer(t *testing.T) {
key := fmt.Sprintf("pod/%s/testpod", ns) key := fmt.Sprintf("pod/%s/testpod", ns)
expected := expectedPodTargetGroups(ns) expected := expectedPodTargetGroups(ns)
expected[key].Targets = append(expected[key].Targets, model.LabelSet{ expected[key].Targets = append(expected[key].Targets, model.LabelSet{
"__address__": "1.2.3.4", "__address__": "1.2.3.4",
"__meta_kubernetes_pod_container_name": "initcontainer", "__meta_kubernetes_pod_container_name": "initcontainer",
"__meta_kubernetes_pod_container_init": "true", "__meta_kubernetes_pod_container_image": "initcontainer:latest",
"__meta_kubernetes_pod_container_init": "true",
}) })
expected[key].Labels["__meta_kubernetes_pod_phase"] = "Pending" expected[key].Labels["__meta_kubernetes_pod_phase"] = "Pending"
expected[key].Labels["__meta_kubernetes_pod_ready"] = "false" expected[key].Labels["__meta_kubernetes_pod_ready"] = "false"
@ -329,7 +339,8 @@ func TestPodDiscoveryUpdate(t *testing.T) {
NodeName: "testnode", NodeName: "testnode",
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "testcontainer", Name: "testcontainer",
Image: "testcontainer:latest",
Ports: []v1.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "testport", Name: "testport",

View file

@ -150,6 +150,7 @@ const (
serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_" serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_"
serviceAnnotationPresentPrefix = metaLabelPrefix + "service_annotationpresent_" serviceAnnotationPresentPrefix = metaLabelPrefix + "service_annotationpresent_"
servicePortNameLabel = metaLabelPrefix + "service_port_name" servicePortNameLabel = metaLabelPrefix + "service_port_name"
servicePortNumberLabel = metaLabelPrefix + "service_port_number"
servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol" servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol"
serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip" serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip"
serviceExternalNameLabel = metaLabelPrefix + "service_external_name" serviceExternalNameLabel = metaLabelPrefix + "service_external_name"
@ -189,6 +190,7 @@ func (s *Service) buildService(svc *apiv1.Service) *targetgroup.Group {
labelSet := model.LabelSet{ labelSet := model.LabelSet{
model.AddressLabel: lv(addr), model.AddressLabel: lv(addr),
servicePortNameLabel: lv(port.Name), servicePortNameLabel: lv(port.Name),
servicePortNumberLabel: lv(strconv.FormatInt(int64(port.Port), 10)),
servicePortProtocolLabel: lv(string(port.Protocol)), servicePortProtocolLabel: lv(string(port.Protocol)),
serviceType: lv(string(svc.Spec.Type)), serviceType: lv(string(svc.Spec.Type)),
} }

View file

@ -113,10 +113,11 @@ func TestServiceDiscoveryAdd(t *testing.T) {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{ {
"__meta_kubernetes_service_port_protocol": "TCP", "__meta_kubernetes_service_port_protocol": "TCP",
"__address__": "testservice.default.svc:30900", "__address__": "testservice.default.svc:30900",
"__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_type": "ClusterIP",
"__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_cluster_ip": "10.0.0.1",
"__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_name": "testport",
"__meta_kubernetes_service_port_number": "30900",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -132,6 +133,7 @@ func TestServiceDiscoveryAdd(t *testing.T) {
"__address__": "testservice-external.default.svc:31900", "__address__": "testservice-external.default.svc:31900",
"__meta_kubernetes_service_type": "ExternalName", "__meta_kubernetes_service_type": "ExternalName",
"__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_name": "testport",
"__meta_kubernetes_service_port_number": "31900",
"__meta_kubernetes_service_external_name": "FooExternalName", "__meta_kubernetes_service_external_name": "FooExternalName",
}, },
}, },
@ -178,17 +180,19 @@ func TestServiceDiscoveryUpdate(t *testing.T) {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{ {
"__meta_kubernetes_service_port_protocol": "TCP", "__meta_kubernetes_service_port_protocol": "TCP",
"__address__": "testservice.default.svc:30900", "__address__": "testservice.default.svc:30900",
"__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_type": "ClusterIP",
"__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_cluster_ip": "10.0.0.1",
"__meta_kubernetes_service_port_name": "testport0", "__meta_kubernetes_service_port_name": "testport0",
"__meta_kubernetes_service_port_number": "30900",
}, },
{ {
"__meta_kubernetes_service_port_protocol": "UDP", "__meta_kubernetes_service_port_protocol": "UDP",
"__address__": "testservice.default.svc:30901", "__address__": "testservice.default.svc:30901",
"__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_type": "ClusterIP",
"__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_cluster_ip": "10.0.0.1",
"__meta_kubernetes_service_port_name": "testport1", "__meta_kubernetes_service_port_name": "testport1",
"__meta_kubernetes_service_port_number": "30901",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -223,10 +227,11 @@ func TestServiceDiscoveryNamespaces(t *testing.T) {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{ {
"__meta_kubernetes_service_port_protocol": "TCP", "__meta_kubernetes_service_port_protocol": "TCP",
"__address__": "testservice.ns1.svc:30900", "__address__": "testservice.ns1.svc:30900",
"__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_type": "ClusterIP",
"__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_cluster_ip": "10.0.0.1",
"__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_name": "testport",
"__meta_kubernetes_service_port_number": "30900",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -239,10 +244,11 @@ func TestServiceDiscoveryNamespaces(t *testing.T) {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{ {
"__meta_kubernetes_service_port_protocol": "TCP", "__meta_kubernetes_service_port_protocol": "TCP",
"__address__": "testservice.ns2.svc:30900", "__address__": "testservice.ns2.svc:30900",
"__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_type": "ClusterIP",
"__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_cluster_ip": "10.0.0.1",
"__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_name": "testport",
"__meta_kubernetes_service_port_number": "30900",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -273,10 +279,11 @@ func TestServiceDiscoveryOwnNamespace(t *testing.T) {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{ {
"__meta_kubernetes_service_port_protocol": "TCP", "__meta_kubernetes_service_port_protocol": "TCP",
"__address__": "testservice.own-ns.svc:30900", "__address__": "testservice.own-ns.svc:30900",
"__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_type": "ClusterIP",
"__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_cluster_ip": "10.0.0.1",
"__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_name": "testport",
"__meta_kubernetes_service_port_number": "30900",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -307,10 +314,11 @@ func TestServiceDiscoveryAllNamespaces(t *testing.T) {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{ {
"__meta_kubernetes_service_port_protocol": "TCP", "__meta_kubernetes_service_port_protocol": "TCP",
"__address__": "testservice.own-ns.svc:30900", "__address__": "testservice.own-ns.svc:30900",
"__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_type": "ClusterIP",
"__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_cluster_ip": "10.0.0.1",
"__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_name": "testport",
"__meta_kubernetes_service_port_number": "30900",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -323,10 +331,11 @@ func TestServiceDiscoveryAllNamespaces(t *testing.T) {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{ {
"__meta_kubernetes_service_port_protocol": "TCP", "__meta_kubernetes_service_port_protocol": "TCP",
"__address__": "testservice.non-own-ns.svc:30900", "__address__": "testservice.non-own-ns.svc:30900",
"__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_type": "ClusterIP",
"__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_cluster_ip": "10.0.0.1",
"__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_name": "testport",
"__meta_kubernetes_service_port_number": "30900",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{

View file

@ -159,10 +159,10 @@ func TestMarathonSDRemoveApp(t *testing.T) {
tg2 := tgs[0] tg2 := tgs[0]
if tg2.Source != tg1.Source { if tg2.Source != tg1.Source {
t.Fatalf("Source is different: %s != %s", tg1.Source, tg2.Source)
if len(tg2.Targets) > 0 { if len(tg2.Targets) > 0 {
t.Fatalf("Got a non-empty target set: %s", tg2.Targets) t.Errorf("Got a non-empty target set: %s", tg2.Targets)
} }
t.Fatalf("Source is different: %s != %s", tg1.Source, tg2.Source)
} }
} }

View file

@ -623,7 +623,7 @@ metadata and a single tag).
DigitalOcean SD configurations allow retrieving scrape targets from [DigitalOcean's](https://www.digitalocean.com/) DigitalOcean SD configurations allow retrieving scrape targets from [DigitalOcean's](https://www.digitalocean.com/)
Droplets API. Droplets API.
This service discovery uses the public IPv4 address by default, by that can be This service discovery uses the public IPv4 address by default, by that can be
changed with relabelling, as demonstrated in [the Prometheus digitalocean-sd changed with relabeling, as demonstrated in [the Prometheus digitalocean-sd
configuration file](/documentation/examples/prometheus-digitalocean.yml). configuration file](/documentation/examples/prometheus-digitalocean.yml).
The following meta labels are available on targets during [relabeling](#relabel_config): The following meta labels are available on targets during [relabeling](#relabel_config):
@ -961,8 +961,8 @@ A DNS-based service discovery configuration allows specifying a set of DNS
domain names which are periodically queried to discover a list of targets. The domain names which are periodically queried to discover a list of targets. The
DNS servers to be contacted are read from `/etc/resolv.conf`. DNS servers to be contacted are read from `/etc/resolv.conf`.
This service discovery method only supports basic DNS A, AAAA and SRV record This service discovery method only supports basic DNS A, AAAA, MX and SRV
queries, but not the advanced DNS-SD approach specified in record queries, but not the advanced DNS-SD approach specified in
[RFC6763](https://tools.ietf.org/html/rfc6763). [RFC6763](https://tools.ietf.org/html/rfc6763).
The following meta labels are available on targets during [relabeling](#relabel_config): The following meta labels are available on targets during [relabeling](#relabel_config):
@ -970,13 +970,14 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_dns_name`: the record name that produced the discovered target. * `__meta_dns_name`: the record name that produced the discovered target.
* `__meta_dns_srv_record_target`: the target field of the SRV record * `__meta_dns_srv_record_target`: the target field of the SRV record
* `__meta_dns_srv_record_port`: the port field of the SRV record * `__meta_dns_srv_record_port`: the port field of the SRV record
* `__meta_dns_mx_record_target`: the target field of the MX record
```yaml ```yaml
# A list of DNS domain names to be queried. # A list of DNS domain names to be queried.
names: names:
[ - <string> ] [ - <string> ]
# The type of DNS query to perform. One of SRV, A, or AAAA. # The type of DNS query to perform. One of SRV, A, AAAA or MX.
[ type: <string> | default = 'SRV' ] [ type: <string> | default = 'SRV' ]
# The port number used if the query type is not SRV. # The port number used if the query type is not SRV.
@ -992,6 +993,11 @@ EC2 SD configurations allow retrieving scrape targets from AWS EC2
instances. The private IP address is used by default, but may be changed to instances. The private IP address is used by default, but may be changed to
the public IP address with relabeling. the public IP address with relabeling.
The IAM credentials used must have the `ec2:DescribeInstances` permission to
discover scrape targets, and may optionally have the
`ec2:DescribeAvailabilityZones` permission if you want the availability zone ID
available as a label (see below).
The following meta labels are available on targets during [relabeling](#relabel_config): The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_ec2_ami`: the EC2 Amazon Machine Image * `__meta_ec2_ami`: the EC2 Amazon Machine Image
@ -1269,6 +1275,7 @@ changes resulting in well-formed target groups are applied.
Files must contain a list of static configs, using these formats: Files must contain a list of static configs, using these formats:
**JSON** **JSON**
```json ```json
[ [
{ {
@ -1282,6 +1289,7 @@ Files must contain a list of static configs, using these formats:
``` ```
**YAML** **YAML**
```yaml ```yaml
- targets: - targets:
[ - '<host>' ] [ - '<host>' ]
@ -1675,6 +1683,7 @@ Available meta labels:
* `__meta_kubernetes_service_labelpresent_<labelname>`: `true` for each label of the service object. * `__meta_kubernetes_service_labelpresent_<labelname>`: `true` for each label of the service object.
* `__meta_kubernetes_service_name`: The name of the service object. * `__meta_kubernetes_service_name`: The name of the service object.
* `__meta_kubernetes_service_port_name`: Name of the service port for the target. * `__meta_kubernetes_service_port_name`: Name of the service port for the target.
* `__meta_kubernetes_service_port_number`: Number of the service port for the target.
* `__meta_kubernetes_service_port_protocol`: Protocol of the service port for the target. * `__meta_kubernetes_service_port_protocol`: Protocol of the service port for the target.
* `__meta_kubernetes_service_type`: The type of the service. * `__meta_kubernetes_service_type`: The type of the service.
@ -1992,7 +2001,7 @@ See below for the configuration options for Lightsail discovery:
Linode SD configurations allow retrieving scrape targets from [Linode's](https://www.linode.com/) Linode SD configurations allow retrieving scrape targets from [Linode's](https://www.linode.com/)
Linode APIv4. Linode APIv4.
This service discovery uses the public IPv4 address by default, by that can be This service discovery uses the public IPv4 address by default, by that can be
changed with relabelling, as demonstrated in [the Prometheus linode-sd changed with relabeling, as demonstrated in [the Prometheus linode-sd
configuration file](/documentation/examples/prometheus-linode.yml). configuration file](/documentation/examples/prometheus-linode.yml).
The following meta labels are available on targets during [relabeling](#relabel_config): The following meta labels are available on targets during [relabeling](#relabel_config):
@ -2461,7 +2470,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction)) * `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction))
This role uses the private IPv4 address by default. This can be This role uses the private IPv4 address by default. This can be
changed with relabelling, as demonstrated in [the Prometheus scaleway-sd changed with relabeling, as demonstrated in [the Prometheus scaleway-sd
configuration file](/documentation/examples/prometheus-scaleway.yml). configuration file](/documentation/examples/prometheus-scaleway.yml).
#### Baremetal role #### Baremetal role
@ -2479,7 +2488,7 @@ configuration file](/documentation/examples/prometheus-scaleway.yml).
* `__meta_scaleway_baremetal_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction)) * `__meta_scaleway_baremetal_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction))
This role uses the public IPv4 address by default. This can be This role uses the public IPv4 address by default. This can be
changed with relabelling, as demonstrated in [the Prometheus scaleway-sd changed with relabeling, as demonstrated in [the Prometheus scaleway-sd
configuration file](/documentation/examples/prometheus-scaleway.yml). configuration file](/documentation/examples/prometheus-scaleway.yml).
See below for the configuration options for Scaleway discovery: See below for the configuration options for Scaleway discovery:
@ -2616,7 +2625,7 @@ for a practical example on how to set up Uyuni Prometheus configuration.
Vultr SD configurations allow retrieving scrape targets from [Vultr](https://www.vultr.com/). Vultr SD configurations allow retrieving scrape targets from [Vultr](https://www.vultr.com/).
This service discovery uses the main IPv4 address by default, which that be This service discovery uses the main IPv4 address by default, which that be
changed with relabelling, as demonstrated in [the Prometheus vultr-sd changed with relabeling, as demonstrated in [the Prometheus vultr-sd
configuration file](/documentation/examples/prometheus-vultr.yml). configuration file](/documentation/examples/prometheus-vultr.yml).
The following meta labels are available on targets during [relabeling](#relabel_config): The following meta labels are available on targets during [relabeling](#relabel_config):

View file

@ -90,7 +90,7 @@ http_server_config:
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options
[ X-Content-Type-Options: <string> ] [ X-Content-Type-Options: <string> ]
# Set the X-XSS-Protection header to all responses. # Set the X-XSS-Protection header to all responses.
# Unset if blank. Accepted value is nosniff. # Unset if blank.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection
[ X-XSS-Protection: <string> ] [ X-XSS-Protection: <string> ]
# Set the Strict-Transport-Security header to HTTP responses. # Set the Strict-Transport-Security header to HTTP responses.

View file

@ -51,13 +51,14 @@ If functions are used in a pipeline, the pipeline value is passed as the last ar
### Numbers ### Numbers
| Name | Arguments | Returns | Notes | | Name | Arguments | Returns | Notes |
| ------------------ | -----------------| --------| --------- | |---------------------| -----------------| --------| --------- |
| humanize | number or string | string | Converts a number to a more readable format, using [metric prefixes](https://en.wikipedia.org/wiki/Metric_prefix). | humanize | number or string | string | Converts a number to a more readable format, using [metric prefixes](https://en.wikipedia.org/wiki/Metric_prefix).
| humanize1024 | number or string | string | Like `humanize`, but uses 1024 as the base rather than 1000. | | humanize1024 | number or string | string | Like `humanize`, but uses 1024 as the base rather than 1000. |
| humanizeDuration | number or string | string | Converts a duration in seconds to a more readable format. | | humanizeDuration | number or string | string | Converts a duration in seconds to a more readable format. |
| humanizePercentage | number or string | string | Converts a ratio value to a fraction of 100. | | humanizePercentage | number or string | string | Converts a ratio value to a fraction of 100. |
| humanizeTimestamp | number or string | string | Converts a Unix timestamp in seconds to a more readable format. | | humanizeTimestamp | number or string | string | Converts a Unix timestamp in seconds to a more readable format. |
| toTime | number or string | *time.Time | Converts a Unix timestamp in seconds to a time.Time. |
Humanizing functions are intended to produce reasonable output for consumption Humanizing functions are intended to produce reasonable output for consumption
by humans, and are not guaranteed to return the same results between Prometheus by humans, and are not guaranteed to return the same results between Prometheus

View file

@ -94,3 +94,12 @@ computed at all.
`--enable-feature=auto-gomaxprocs` `--enable-feature=auto-gomaxprocs`
When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota. When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota.
## No default scrape port
`--enable-feature=no-default-scrape-port`
When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to
the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior.
In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or
by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed.

View file

@ -206,6 +206,35 @@ $ curl 'http://localhost:9090/api/v1/query_range?query=up&start=2015-07-01T20:10
} }
``` ```
## Formatting query expressions
The following endpoint formats a PromQL expression in a prettified way:
```
GET /api/v1/format_query
POST /api/v1/format_query
```
URL query parameters:
- `query=<string>`: Prometheus expression query string.
You can URL-encode these parameters directly in the request body by using the `POST` method and
`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large
query that may breach server-side URL character limits.
The `data` section of the query result is a string containing the formatted query expression. Note that any comments are removed in the formatted string.
The following example formats the expression `foo/bar`:
```json
$ curl 'http://localhost:9090/api/v1/format_query?query=foo/bar'
{
"status" : "success",
"data" : "foo / bar"
}
```
## Querying metadata ## Querying metadata
Prometheus offers a set of API endpoints to query metadata about series and their labels. Prometheus offers a set of API endpoints to query metadata about series and their labels.
@ -476,8 +505,8 @@ GET /api/v1/targets
``` ```
Both the active and dropped targets are part of the response by default. Both the active and dropped targets are part of the response by default.
`labels` represents the label set after relabelling has occurred. `labels` represents the label set after relabeling has occurred.
`discoveredLabels` represent the unmodified labels retrieved during service discovery before relabelling has occurred. `discoveredLabels` represent the unmodified labels retrieved during service discovery before relabeling has occurred.
```json ```json
$ curl http://localhost:9090/api/v1/targets $ curl http://localhost:9090/api/v1/targets

View file

@ -8,7 +8,7 @@ require (
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.9.8 github.com/influxdata/influxdb v1.9.8
github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_golang v1.12.2
github.com/prometheus/common v0.36.0 github.com/prometheus/common v0.37.0
github.com/stretchr/testify v1.8.0 github.com/stretchr/testify v1.8.0
gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2 v2.2.6
) )

View file

@ -1013,8 +1013,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.36.0 h1:78hJTing+BLYLjhXE+Z2BubeEymH5Lr0/Mt8FKkxxYo= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.36.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=

View file

@ -320,7 +320,7 @@
{ {
alert: 'PrometheusHighQueryLoad', alert: 'PrometheusHighQueryLoad',
expr: ||| expr: |||
avg_over_time(prometheus_engine_queries{job="prometheus-k8s"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job="prometheus-k8s"}[5m]) > 0.8 avg_over_time(prometheus_engine_queries{%(prometheusSelector)s}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{%(prometheusSelector)s}[5m]) > 0.8
||| % $._config, ||| % $._config,
'for': '15m', 'for': '15m',
labels: { labels: {

77
go.mod
View file

@ -4,14 +4,14 @@ go 1.17
require ( require (
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
github.com/Azure/go-autorest/autorest v0.11.27 github.com/Azure/go-autorest/autorest v0.11.28
github.com/Azure/go-autorest/autorest/adal v0.9.20 github.com/Azure/go-autorest/autorest/adal v0.9.21
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
github.com/aws/aws-sdk-go v1.44.47 github.com/aws/aws-sdk-go v1.44.67
github.com/cespare/xxhash/v2 v2.1.2 github.com/cespare/xxhash/v2 v2.1.2
github.com/dennwc/varint v1.0.0 github.com/dennwc/varint v1.0.0
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
github.com/digitalocean/godo v1.81.0 github.com/digitalocean/godo v1.82.0
github.com/docker/docker v20.10.17+incompatible github.com/docker/docker v20.10.17+incompatible
github.com/edsrzf/mmap-go v1.1.0 github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.10.3 github.com/envoyproxy/go-control-plane v0.10.3
@ -19,18 +19,18 @@ require (
github.com/fsnotify/fsnotify v1.5.4 github.com/fsnotify/fsnotify v1.5.4
github.com/go-kit/log v0.2.1 github.com/go-kit/log v0.2.1
github.com/go-logfmt/logfmt v0.5.1 github.com/go-logfmt/logfmt v0.5.1
github.com/go-openapi/strfmt v0.21.2 github.com/go-openapi/strfmt v0.21.3
github.com/go-zookeeper/zk v1.0.2 github.com/go-zookeeper/zk v1.0.3
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3 github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3
github.com/gophercloud/gophercloud v0.25.0 github.com/gophercloud/gophercloud v0.25.0
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2
github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.13.0 github.com/hashicorp/consul/api v1.13.1
github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec
github.com/hetznercloud/hcloud-go v1.35.0 github.com/hetznercloud/hcloud-go v1.35.2
github.com/ionos-cloud/sdk-go/v6 v6.1.0 github.com/ionos-cloud/sdk-go/v6 v6.1.2
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
github.com/linode/linodego v1.8.0 github.com/linode/linodego v1.8.0
@ -40,9 +40,9 @@ require (
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/prometheus/alertmanager v0.24.0 github.com/prometheus/alertmanager v0.24.0
github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_golang v1.13.0
github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.35.0 github.com/prometheus/common v0.37.0
github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.7.1 github.com/prometheus/exporter-toolkit v0.7.1
@ -50,32 +50,32 @@ require (
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
github.com/stretchr/testify v1.8.0 github.com/stretchr/testify v1.8.0
github.com/vultr/govultr/v2 v2.17.2 github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0
go.opentelemetry.io/otel v1.7.0 go.opentelemetry.io/otel v1.9.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.9.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.9.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.9.0
go.opentelemetry.io/otel/sdk v1.7.0 go.opentelemetry.io/otel/sdk v1.9.0
go.opentelemetry.io/otel/trace v1.7.0 go.opentelemetry.io/otel/trace v1.9.0
go.uber.org/atomic v1.9.0 go.uber.org/atomic v1.9.0
go.uber.org/automaxprocs v1.5.1 go.uber.org/automaxprocs v1.5.1
go.uber.org/goleak v1.1.12 go.uber.org/goleak v1.1.12
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26 golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b golang.org/x/sys v0.0.0-20220731174439-a90be440212d
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 golang.org/x/time v0.0.0-20220609170525-579cf78fd858
golang.org/x/tools v0.1.11 golang.org/x/tools v0.1.12
google.golang.org/api v0.86.0 google.golang.org/api v0.91.0
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03 google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78
google.golang.org/grpc v1.47.0 google.golang.org/grpc v1.48.0
google.golang.org/protobuf v1.28.0 google.golang.org/protobuf v1.28.1
gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.24.2 k8s.io/api v0.24.3
k8s.io/apimachinery v0.24.2 k8s.io/apimachinery v0.24.3
k8s.io/client-go v0.24.2 k8s.io/client-go v0.24.3
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.70.0 k8s.io/klog/v2 v2.70.0
) )
@ -101,7 +101,7 @@ require (
github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect github.com/emicklei/go-restful v2.16.0+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fatih/color v1.13.0 // indirect github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect
@ -118,7 +118,6 @@ require (
github.com/go-openapi/swag v0.21.1 // indirect github.com/go-openapi/swag v0.21.1 // indirect
github.com/go-openapi/validate v0.21.0 // indirect github.com/go-openapi/validate v0.21.0 // indirect
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 // indirect github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/glog v1.0.0 // indirect github.com/golang/glog v1.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@ -131,7 +130,7 @@ require (
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
github.com/googleapis/gax-go/v2 v2.4.0 // indirect github.com/googleapis/gax-go/v2 v2.4.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect github.com/gorilla/websocket v1.4.2 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect
github.com/hashicorp/cronexpr v1.1.1 // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v0.12.2 // indirect github.com/hashicorp/go-hclog v0.12.2 // indirect
@ -162,15 +161,15 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect github.com/prometheus/procfs v0.8.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
go.mongodb.org/mongo-driver v1.8.3 // indirect go.mongodb.org/mongo-driver v1.10.0 // indirect
go.opencensus.io v0.23.0 // indirect go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.9.0 // indirect
go.opentelemetry.io/otel/metric v0.30.0 // indirect go.opentelemetry.io/otel/metric v0.31.0 // indirect
go.opentelemetry.io/proto/otlp v0.16.0 // indirect go.opentelemetry.io/proto/otlp v0.18.0 // indirect
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 // indirect golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect golang.org/x/text v0.3.7 // indirect

162
go.sum
View file

@ -62,12 +62,12 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk=
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
@ -124,8 +124,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.47 h1:uyiNvoR4wfZ8Bp4ghgbyzGFIg5knjZMUAd5S9ba9qNU= github.com/aws/aws-sdk-go v1.44.67 h1:+nxfXbMe8QUB6svLsuLYsp+WhZBKM26w62Zidir739A=
github.com/aws/aws-sdk-go v1.44.47/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.67/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -183,8 +183,8 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.81.0 h1:sjb3fOfPfSlUQUK22E87BcI8Zx2qtnF7VUCCO4UK3C8= github.com/digitalocean/godo v1.82.0 h1:lqAit46H1CqJGjh7LDbsamng/UMBME5rvmfH3Vb5Yy8=
github.com/digitalocean/godo v1.81.0/go.mod h1:BPCqvwbjbGqxuUnIKB4EvS/AX7IDnNmt5fwvIkWo+ew= github.com/digitalocean/godo v1.82.0/go.mod h1:BPCqvwbjbGqxuUnIKB4EvS/AX7IDnNmt5fwvIkWo+ew=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
@ -206,8 +206,9 @@ github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM=
github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@ -230,7 +231,6 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
@ -285,8 +285,9 @@ github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os=
github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o=
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
@ -305,8 +306,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
@ -467,14 +468,14 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2 h1:ERKrevVTnCw3Wu4I3mtR15QU3gtWy86cBo6De0jEohg= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQurQmvGWmwn2bBPM9c=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2/go.mod h1:chrfS3YoLAlKTRE5cFWvCbt8uGAjshktT4PveTUpsFQ= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.13.0 h1:2hnLQ0GjQvw7f3O61jMO8gbasZviZTrt9R8WzgiirHc= github.com/hashicorp/consul/api v1.13.1 h1:r5cPdVFUy+pFF7nt+0ArLD9hm+E39OewJkvNdjKXcL4=
github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= github.com/hashicorp/consul/api v1.13.1/go.mod h1:+1VcOos0TVdQFqXxphG4zmGcwQB4KVGkp1maPqnkDpE=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= github.com/hashicorp/consul/sdk v0.10.0 h1:rGLEh2AWK4K0KCMvqWAz2EYxQqgciIfMagWZ0nVe5MI=
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/consul/sdk v0.10.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw=
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
@ -527,8 +528,8 @@ github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec/go.mod h1:jP79
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc=
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
github.com/hetznercloud/hcloud-go v1.35.0 h1:sduXOrWM0/sJXwBty7EQd7+RXEJh5+CsAGQmHshChFg= github.com/hetznercloud/hcloud-go v1.35.2 h1:eEDtmDiI2plZ2UQmj4YpiYse5XbtpXOUBpAdIOLxzgE=
github.com/hetznercloud/hcloud-go v1.35.0/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA= github.com/hetznercloud/hcloud-go v1.35.2/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
@ -540,8 +541,8 @@ github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/ionos-cloud/sdk-go/v6 v6.1.0 h1:0EZz5H+t6W23zHt6dgHYkKavr72/30O9nA97E3FZaS4= github.com/ionos-cloud/sdk-go/v6 v6.1.2 h1:es5R5sVmjHFrYNBbJfAeHF+16GheaJMyc63xWxIAec4=
github.com/ionos-cloud/sdk-go/v6 v6.1.0/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= github.com/ionos-cloud/sdk-go/v6 v6.1.2/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -744,8 +745,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -761,8 +762,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
@ -775,8 +776,9 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@ -852,7 +854,9 @@ github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZ
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
@ -862,12 +866,14 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
go.mongodb.org/mongo-driver v1.8.3 h1:TDKlTkGDKm9kkJVUOAXDK5/fkqKHJVwYQSpoRfB43R4=
go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg=
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@ -878,28 +884,30 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0 h1:mac9BKRqwaX6zxHPDe3pvmWpwuuIM0vuXv2juCnQevE= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0 h1:9NkMW03wwEzPtP/KciZ4Ozu/Uz5ZA7kfqXJIObnrjGU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0/go.mod h1:548ZsYzmT4PL4zWKRd8q/N4z0Wxzn/ZxUE+lkEpwWQA=
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM=
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 h1:7Yxsak1q4XrJ5y7XBnNwqWx9amMZvoidCctv62XOQ6Y= go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.9.0 h1:ggqApEjDKczicksfvZUCxuvoyDmR6Sbm56LwiK8DVR0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0 h1:cMDtmgJ5FpRvqx9x2Aq+Mm0O6K/zcUkH73SFz20TuBw= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.9.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.9.0 h1:NN90Cuna0CnBg8YNu1Q0V35i2E8LDByFOwHRCq/ZP9I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0 h1:MFAyzUPrTwLOwCi+cltN0ZVyy4phU41lwH+lyMyQTS4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.9.0/go.mod h1:0EsCXjZAiiZGnLdEUXM9YjCKuuLZMYyglh2QDXcYKVA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.9.0 h1:M0/hqGuJBLeIEu20f89H74RGtqV2dn+SFWEz9ATAAwY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0 h1:pLP0MH4MAqeTEV0g/4flxw9O8Is48uAIauAnjznbW50= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.9.0/go.mod h1:K5G92gbtCrYJ0mn6zj9Pst7YFsDFuvSYEhYKRMcufnM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0/go.mod h1:aFXT9Ng2seM9eizF+LfKiyPBGy8xIZKwhusC1gIu3hA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.9.0 h1:FAF9l8Wjxi9Ad2k/vLTfHZyzXYX72C62wBGpV3G6AIo=
go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.9.0/go.mod h1:smUdtylgc0YQiUr2PuifS4hBXhAS5xtR6WQhxP1wiNA=
go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs=
go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0= go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= go.opentelemetry.io/otel/sdk v1.9.0 h1:LNXp1vrr83fNXTHgU8eO89mhzxb/bbWAsHG6fNf3qWo=
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= go.opentelemetry.io/otel/sdk v1.9.0/go.mod h1:AEZc8nt5bd2F7BC24J5R0mrjYnpEgYHyTcM/vrSple4=
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4=
go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc=
go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v0.16.0 h1:WHzDWdXUvbc5bG2ObdrGfaNpQz7ft7QN9HHmJlbiB1E= go.opentelemetry.io/proto/otlp v0.18.0 h1:W5hyXNComRa23tGpKwG+FRAc4rfF6ZUg1JReK+QHS80=
go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
@ -930,10 +938,10 @@ golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWP
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1033,8 +1041,10 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b h1:3ogNYyK4oIQdIKzTu68hQrr4iuVxF3AxKl9Aj/eDrw0=
golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1056,8 +1066,8 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26 h1:uBgVQYJLi/m8M0wzp+aGwBWt90gMRoOVf+aWTW10QHI= golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c h1:q3gFqPqH7NVofKo3c3yETAP//pPI+G5mvB7qqj1Y5kY=
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1070,8 +1080,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1109,7 +1120,6 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1168,8 +1178,10 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220731174439-a90be440212d h1:Sv5ogFZatcgIMMtBSTTAgMYsicp25MXBubjXNDKwm80=
golang.org/x/sys v0.0.0-20220731174439-a90be440212d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
@ -1258,8 +1270,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1306,8 +1318,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
google.golang.org/api v0.86.0 h1:ZAnyOHQFIuWso1BodVfSaRyffD74T9ERGFa3k1fNk/U= google.golang.org/api v0.91.0 h1:731+JzuwaJoZXRQGmPoBiV+SrsAfUaIkdMCWTcQNPyA=
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1393,13 +1405,13 @@ google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03 h1:W70HjnmXFJm+8RNjOpIDYW2nKsSi/af0VvIZUtYkwuU= google.golang.org/genproto v0.0.0-20220728213248-dd149ef739b9/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78 h1:QntLWYqZeuBtJkth3m/6DLznnI0AHJr+AgJXvVh/izw=
google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1435,8 +1447,9 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -1451,8 +1464,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1501,12 +1515,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= k8s.io/api v0.24.3 h1:tt55QEmKd6L2k5DP6G/ZzdMQKvG5ro4H4teClqm0sTY=
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= k8s.io/api v0.24.3/go.mod h1:elGR/XSZrS7z7cSZPzVWaycpJuGIw57j9b95/1PdJNI=
k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= k8s.io/apimachinery v0.24.3 h1:hrFiNSA2cBZqllakVYyH/VyEh4B581bQRmqATJSeQTg=
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= k8s.io/client-go v0.24.3 h1:Nl1840+6p4JqkFWEW2LnMKU667BUxw03REfLAVhuKQY=
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= k8s.io/client-go v0.24.3/go.mod h1:AAovolf5Z9bY1wIg2FZ8LPQlEdKHjLI7ZD4rw920BJw=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=

View file

@ -14,11 +14,13 @@
package labels package labels
import ( import (
"encoding/json"
"fmt" "fmt"
"strings" "strings"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
yaml "gopkg.in/yaml.v2"
) )
func TestLabels_String(t *testing.T) { func TestLabels_String(t *testing.T) {
@ -503,9 +505,19 @@ func TestLabels_Compare(t *testing.T) {
}, },
} }
sign := func(a int) int {
switch {
case a < 0:
return -1
case a > 0:
return 1
}
return 0
}
for i, test := range tests { for i, test := range tests {
got := Compare(labels, test.compared) got := Compare(labels, test.compared)
require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i) require.Equal(t, sign(test.expected), sign(got), "unexpected comparison result for test case %d", i)
} }
} }
@ -561,19 +573,19 @@ func TestLabels_Get(t *testing.T) {
// Labels_Get/with_30_labels/get_last_label 169ns ± 0% 29ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_30_labels/get_last_label 169ns ± 0% 29ns ± 0% ~ (p=1.000 n=1+1)
func BenchmarkLabels_Get(b *testing.B) { func BenchmarkLabels_Get(b *testing.B) {
maxLabels := 30 maxLabels := 30
allLabels := make(Labels, maxLabels) allLabels := make([]Label, maxLabels)
for i := 0; i < maxLabels; i++ { for i := 0; i < maxLabels; i++ {
allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5)} allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5)}
} }
for _, size := range []int{5, 10, maxLabels} { for _, size := range []int{5, 10, maxLabels} {
b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) { b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) {
labels := allLabels[:size] labels := New(allLabels[:size]...)
for _, scenario := range []struct { for _, scenario := range []struct {
desc, label string desc, label string
}{ }{
{"get first label", labels[0].Name}, {"get first label", allLabels[0].Name},
{"get middle label", labels[size/2].Name}, {"get middle label", allLabels[size/2].Name},
{"get last label", labels[size-1].Name}, {"get last label", allLabels[size-1].Name},
} { } {
b.Run(scenario.desc, func(b *testing.B) { b.Run(scenario.desc, func(b *testing.B) {
b.ResetTimer() b.ResetTimer()
@ -631,6 +643,7 @@ func TestLabels_BytesWithLabels(t *testing.T) {
func TestLabels_BytesWithoutLabels(t *testing.T) { func TestLabels_BytesWithoutLabels(t *testing.T) {
require.Equal(t, Labels{{"aaa", "111"}}.Bytes(nil), Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.BytesWithoutLabels(nil, "bbb", "ccc")) require.Equal(t, Labels{{"aaa", "111"}}.Bytes(nil), Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.BytesWithoutLabels(nil, "bbb", "ccc"))
require.Equal(t, Labels{{MetricName, "333"}, {"aaa", "111"}}.Bytes(nil), Labels{{MetricName, "333"}, {"aaa", "111"}, {"bbb", "222"}}.BytesWithoutLabels(nil, "bbb"))
require.Equal(t, Labels{{"aaa", "111"}}.Bytes(nil), Labels{{MetricName, "333"}, {"aaa", "111"}, {"bbb", "222"}}.BytesWithoutLabels(nil, MetricName, "bbb")) require.Equal(t, Labels{{"aaa", "111"}}.Bytes(nil), Labels{{MetricName, "333"}, {"aaa", "111"}, {"bbb", "222"}}.BytesWithoutLabels(nil, MetricName, "bbb"))
} }
@ -720,7 +733,6 @@ func TestLabels_Hash(t *testing.T) {
{Name: "baz", Value: "qux"}, {Name: "baz", Value: "qux"},
} }
require.Equal(t, lbls.Hash(), lbls.Hash()) require.Equal(t, lbls.Hash(), lbls.Hash())
require.NotEqual(t, lbls.Hash(), Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.")
require.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.") require.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.")
} }
@ -778,3 +790,52 @@ func BenchmarkLabels_Hash(b *testing.B) {
}) })
} }
} }
func TestMarshaling(t *testing.T) {
lbls := FromStrings("aaa", "111", "bbb", "2222", "ccc", "33333")
expectedJSON := "{\"aaa\":\"111\",\"bbb\":\"2222\",\"ccc\":\"33333\"}"
b, err := json.Marshal(lbls)
require.NoError(t, err)
require.Equal(t, expectedJSON, string(b))
var gotJ Labels
err = json.Unmarshal(b, &gotJ)
require.NoError(t, err)
require.Equal(t, lbls, gotJ)
expectedYAML := "aaa: \"111\"\nbbb: \"2222\"\nccc: \"33333\"\n"
b, err = yaml.Marshal(lbls)
require.NoError(t, err)
require.Equal(t, expectedYAML, string(b))
var gotY Labels
err = yaml.Unmarshal(b, &gotY)
require.NoError(t, err)
require.Equal(t, lbls, gotY)
// Now in a struct with a tag
type foo struct {
ALabels Labels `json:"a_labels,omitempty" yaml:"a_labels,omitempty"`
}
f := foo{ALabels: lbls}
b, err = json.Marshal(f)
require.NoError(t, err)
expectedJSONFromStruct := "{\"a_labels\":" + expectedJSON + "}"
require.Equal(t, expectedJSONFromStruct, string(b))
var gotFJ foo
err = json.Unmarshal(b, &gotFJ)
require.NoError(t, err)
require.Equal(t, f, gotFJ)
b, err = yaml.Marshal(f)
require.NoError(t, err)
expectedYAMLFromStruct := "a_labels:\n aaa: \"111\"\n bbb: \"2222\"\n ccc: \"33333\"\n"
require.Equal(t, expectedYAMLFromStruct, string(b))
var gotFY foo
err = yaml.Unmarshal(b, &gotFY)
require.NoError(t, err)
require.Equal(t, f, gotFY)
}

View file

@ -0,0 +1,23 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metadata
import "github.com/prometheus/prometheus/model/textparse"
// Metadata stores a series' metadata information.
type Metadata struct {
Type textparse.MetricType
Unit string
Help string
}

View file

@ -130,6 +130,8 @@ type Query interface {
type QueryOpts struct { type QueryOpts struct {
// Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default.
EnablePerStepStats bool EnablePerStepStats bool
// Lookback delta duration for this query.
LookbackDelta time.Duration
} }
// query implements the Query interface. // query implements the Query interface.
@ -438,11 +440,17 @@ func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Exp
opts = &QueryOpts{} opts = &QueryOpts{}
} }
lookbackDelta := opts.LookbackDelta
if lookbackDelta <= 0 {
lookbackDelta = ng.lookbackDelta
}
es := &parser.EvalStmt{ es := &parser.EvalStmt{
Expr: PreprocessExpr(expr, start, end), Expr: PreprocessExpr(expr, start, end),
Start: start, Start: start,
End: end, End: end,
Interval: interval, Interval: interval,
LookbackDelta: lookbackDelta,
} }
qry := &query{ qry := &query{
stmt: es, stmt: es,
@ -637,7 +645,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
ctx: ctxInnerEval, ctx: ctxInnerEval,
maxSamples: ng.maxSamplesPerQuery, maxSamples: ng.maxSamplesPerQuery,
logger: ng.logger, logger: ng.logger,
lookbackDelta: ng.lookbackDelta, lookbackDelta: s.LookbackDelta,
samplesStats: query.sampleStats, samplesStats: query.sampleStats,
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
} }
@ -689,7 +697,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
ctx: ctxInnerEval, ctx: ctxInnerEval,
maxSamples: ng.maxSamplesPerQuery, maxSamples: ng.maxSamplesPerQuery,
logger: ng.logger, logger: ng.logger,
lookbackDelta: ng.lookbackDelta, lookbackDelta: s.LookbackDelta,
samplesStats: query.sampleStats, samplesStats: query.sampleStats,
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
} }
@ -802,7 +810,7 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS
} }
if evalRange == 0 { if evalRange == 0 {
start = start - durationMilliseconds(ng.lookbackDelta) start = start - durationMilliseconds(s.LookbackDelta)
} else { } else {
// For all matrix queries we want to ensure that we have (end-start) + range selected // For all matrix queries we want to ensure that we have (end-start) + range selected
// this way we have `range` data before the start time // this way we have `range` data before the start time
@ -938,7 +946,7 @@ func (ev *evaluator) error(err error) {
} }
// recover is the handler that turns panics into returns from the top level of evaluation. // recover is the handler that turns panics into returns from the top level of evaluation.
func (ev *evaluator) recover(ws *storage.Warnings, errp *error) { func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error) {
e := recover() e := recover()
if e == nil { if e == nil {
return return
@ -950,7 +958,7 @@ func (ev *evaluator) recover(ws *storage.Warnings, errp *error) {
buf := make([]byte, 64<<10) buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)] buf = buf[:runtime.Stack(buf, false)]
level.Error(ev.logger).Log("msg", "runtime panic in parser", "err", e, "stacktrace", string(buf)) level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf))
*errp = fmt.Errorf("unexpected error: %w", err) *errp = fmt.Errorf("unexpected error: %w", err)
case errWithWarnings: case errWithWarnings:
*errp = err.err *errp = err.err
@ -963,7 +971,7 @@ func (ev *evaluator) recover(ws *storage.Warnings, errp *error) {
} }
func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings, err error) { func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings, err error) {
defer ev.recover(&ws, &err) defer ev.recover(expr, &ws, &err)
v, ws = ev.eval(expr) v, ws = ev.eval(expr)
return v, ws, nil return v, ws, nil

View file

@ -1644,17 +1644,27 @@ load 1ms
} }
func TestRecoverEvaluatorRuntime(t *testing.T) { func TestRecoverEvaluatorRuntime(t *testing.T) {
ev := &evaluator{logger: log.NewNopLogger()} var output []interface{}
logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error {
output = append(output, keyvals...)
return nil
}))
ev := &evaluator{logger: logger}
expr, _ := parser.ParseExpr("sum(up)")
var err error var err error
defer ev.recover(nil, &err)
defer func() {
require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0")
require.Contains(t, output, "sum(up)")
}()
defer ev.recover(expr, nil, &err)
// Cause a runtime panic. // Cause a runtime panic.
var a []int var a []int
//nolint:govet //nolint:govet
a[123] = 1 a[123] = 1
require.EqualError(t, err, "unexpected error")
} }
func TestRecoverEvaluatorError(t *testing.T) { func TestRecoverEvaluatorError(t *testing.T) {
@ -1666,7 +1676,7 @@ func TestRecoverEvaluatorError(t *testing.T) {
defer func() { defer func() {
require.EqualError(t, err, e.Error()) require.EqualError(t, err, e.Error())
}() }()
defer ev.recover(nil, &err) defer ev.recover(nil, nil, &err)
panic(e) panic(e)
} }
@ -1686,7 +1696,7 @@ func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) {
require.EqualError(t, err, e.Error()) require.EqualError(t, err, e.Error())
require.Equal(t, warnings, ws, "wrong warning message") require.Equal(t, warnings, ws, "wrong warning message")
}() }()
defer ev.recover(&ws, &err) defer ev.recover(nil, &ws, &err)
panic(e) panic(e)
} }
@ -4019,3 +4029,96 @@ func TestSparseHistogram_Sum_AddOperator(t *testing.T) {
}) })
} }
} }
func TestQueryLookbackDelta(t *testing.T) {
var (
load = `load 5m
metric 0 1 2
`
query = "metric"
lastDatapointTs = time.Unix(600, 0)
)
cases := []struct {
name string
ts time.Time
engineLookback, queryLookback time.Duration
expectSamples bool
}{
{
name: "default lookback delta",
ts: lastDatapointTs.Add(defaultLookbackDelta),
expectSamples: true,
},
{
name: "outside default lookback delta",
ts: lastDatapointTs.Add(defaultLookbackDelta + time.Millisecond),
expectSamples: false,
},
{
name: "custom engine lookback delta",
ts: lastDatapointTs.Add(10 * time.Minute),
engineLookback: 10 * time.Minute,
expectSamples: true,
},
{
name: "outside custom engine lookback delta",
ts: lastDatapointTs.Add(10*time.Minute + time.Millisecond),
engineLookback: 10 * time.Minute,
expectSamples: false,
},
{
name: "custom query lookback delta",
ts: lastDatapointTs.Add(20 * time.Minute),
engineLookback: 10 * time.Minute,
queryLookback: 20 * time.Minute,
expectSamples: true,
},
{
name: "outside custom query lookback delta",
ts: lastDatapointTs.Add(20*time.Minute + time.Millisecond),
engineLookback: 10 * time.Minute,
queryLookback: 20 * time.Minute,
expectSamples: false,
},
{
name: "negative custom query lookback delta",
ts: lastDatapointTs.Add(20 * time.Minute),
engineLookback: -10 * time.Minute,
queryLookback: 20 * time.Minute,
expectSamples: true,
},
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
test, err := NewTest(t, load)
require.NoError(t, err)
defer test.Close()
err = test.Run()
require.NoError(t, err)
eng := test.QueryEngine()
if c.engineLookback != 0 {
eng.lookbackDelta = c.engineLookback
}
opts := &QueryOpts{
LookbackDelta: c.queryLookback,
}
qry, err := eng.NewInstantQuery(test.Queryable(), opts, query, c.ts)
require.NoError(t, err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
vec, ok := res.Value.(Vector)
require.True(t, ok)
if c.expectSamples {
require.NotEmpty(t, vec)
} else {
require.Empty(t, vec)
}
})
}
}

View file

@ -68,6 +68,8 @@ type EvalStmt struct {
Start, End time.Time Start, End time.Time
// Time between two evaluated instants for the range [Start:End]. // Time between two evaluated instants for the range [Start:End].
Interval time.Duration Interval time.Duration
// Lookback delta to use for this evaluation.
LookbackDelta time.Duration
} }
func (*EvalStmt) PromQLStmt() {} func (*EvalStmt) PromQLStmt() {}

View file

@ -37,25 +37,25 @@ func TestAggregateExprPretty(t *testing.T) {
}, },
{ {
in: `sum without(job,foo) (task:errors:rate10s{job="s"})`, in: `sum without(job,foo) (task:errors:rate10s{job="s"})`,
out: `sum without(job, foo) ( out: `sum without (job, foo) (
task:errors:rate10s{job="s"} task:errors:rate10s{job="s"}
)`, )`,
}, },
{ {
in: `sum(task:errors:rate10s{job="s"}) without(job,foo)`, in: `sum(task:errors:rate10s{job="s"}) without(job,foo)`,
out: `sum without(job, foo) ( out: `sum without (job, foo) (
task:errors:rate10s{job="s"} task:errors:rate10s{job="s"}
)`, )`,
}, },
{ {
in: `sum by(job,foo) (task:errors:rate10s{job="s"})`, in: `sum by(job,foo) (task:errors:rate10s{job="s"})`,
out: `sum by(job, foo) ( out: `sum by (job, foo) (
task:errors:rate10s{job="s"} task:errors:rate10s{job="s"}
)`, )`,
}, },
{ {
in: `sum (task:errors:rate10s{job="s"}) by(job,foo)`, in: `sum (task:errors:rate10s{job="s"}) by(job,foo)`,
out: `sum by(job, foo) ( out: `sum by (job, foo) (
task:errors:rate10s{job="s"} task:errors:rate10s{job="s"}
)`, )`,
}, },
@ -68,17 +68,17 @@ func TestAggregateExprPretty(t *testing.T) {
}, },
{ {
in: `sum by(job,foo) (sum by(job,foo) (task:errors:rate10s{job="s"}))`, in: `sum by(job,foo) (sum by(job,foo) (task:errors:rate10s{job="s"}))`,
out: `sum by(job, foo) ( out: `sum by (job, foo) (
sum by(job, foo) ( sum by (job, foo) (
task:errors:rate10s{job="s"} task:errors:rate10s{job="s"}
) )
)`, )`,
}, },
{ {
in: `sum by(job,foo) (sum by(job,foo) (sum by(job,foo) (task:errors:rate10s{job="s"})))`, in: `sum by(job,foo) (sum by(job,foo) (sum by(job,foo) (task:errors:rate10s{job="s"})))`,
out: `sum by(job, foo) ( out: `sum by (job, foo) (
sum by(job, foo) ( sum by (job, foo) (
sum by(job, foo) ( sum by (job, foo) (
task:errors:rate10s{job="s"} task:errors:rate10s{job="s"}
) )
) )
@ -87,8 +87,8 @@ func TestAggregateExprPretty(t *testing.T) {
{ {
in: `sum by(job,foo) in: `sum by(job,foo)
(sum by(job,foo) (task:errors:rate10s{job="s"}))`, (sum by(job,foo) (task:errors:rate10s{job="s"}))`,
out: `sum by(job, foo) ( out: `sum by (job, foo) (
sum by(job, foo) ( sum by (job, foo) (
task:errors:rate10s{job="s"} task:errors:rate10s{job="s"}
) )
)`, )`,
@ -96,8 +96,8 @@ func TestAggregateExprPretty(t *testing.T) {
{ {
in: `sum by(job,foo) in: `sum by(job,foo)
(sum(task:errors:rate10s{job="s"}) without(job,foo))`, (sum(task:errors:rate10s{job="s"}) without(job,foo))`,
out: `sum by(job, foo) ( out: `sum by (job, foo) (
sum without(job, foo) ( sum without (job, foo) (
task:errors:rate10s{job="s"} task:errors:rate10s{job="s"}
) )
)`, )`,
@ -106,8 +106,8 @@ func TestAggregateExprPretty(t *testing.T) {
in: `sum by(job,foo) # Comment 1. in: `sum by(job,foo) # Comment 1.
(sum by(job,foo) ( # Comment 2. (sum by(job,foo) ( # Comment 2.
task:errors:rate10s{job="s"}))`, task:errors:rate10s{job="s"}))`,
out: `sum by(job, foo) ( out: `sum by (job, foo) (
sum by(job, foo) ( sum by (job, foo) (
task:errors:rate10s{job="s"} task:errors:rate10s{job="s"}
) )
)`, )`,
@ -139,7 +139,7 @@ func TestBinaryExprPretty(t *testing.T) {
{ {
in: `a + ignoring(job) b`, in: `a + ignoring(job) b`,
out: ` a out: ` a
+ ignoring(job) + ignoring (job)
b`, b`,
}, },
{ {
@ -175,19 +175,21 @@ func TestBinaryExprPretty(t *testing.T) {
{ {
in: `foo_1 + ignoring(foo) foo_2 + ignoring(job) group_left foo_3 + on(instance) group_right foo_4`, in: `foo_1 + ignoring(foo) foo_2 + ignoring(job) group_left foo_3 + on(instance) group_right foo_4`,
out: ` foo_1 out: ` foo_1
+ ignoring(foo) + ignoring (foo)
foo_2 foo_2
+ ignoring(job) group_left() + ignoring (job) group_left ()
foo_3 foo_3
+ on(instance) group_right() + on (instance) group_right ()
foo_4`, foo_4`,
}, },
} }
for _, test := range inputs { for _, test := range inputs {
expr, err := ParseExpr(test.in) t.Run(test.in, func(t *testing.T) {
require.NoError(t, err) expr, err := ParseExpr(test.in)
require.NoError(t, err)
require.Equal(t, test.out, Prettify(expr)) require.Equal(t, test.out, Prettify(expr))
})
} }
} }
@ -560,7 +562,7 @@ or
}, },
{ {
in: `min by (job, integration) (rate(alertmanager_notifications_failed_total{job="alertmanager", integration=~".*"}[5m]) / rate(alertmanager_notifications_total{job="alertmanager", integration="~.*"}[5m])) > 0.01`, in: `min by (job, integration) (rate(alertmanager_notifications_failed_total{job="alertmanager", integration=~".*"}[5m]) / rate(alertmanager_notifications_total{job="alertmanager", integration="~.*"}[5m])) > 0.01`,
out: ` min by(job, integration) ( out: ` min by (job, integration) (
rate( rate(
alertmanager_notifications_failed_total{integration=~".*",job="alertmanager"}[5m] alertmanager_notifications_failed_total{integration=~".*",job="alertmanager"}[5m]
) )
@ -575,7 +577,7 @@ or
{ {
in: `(count by (job) (changes(process_start_time_seconds{job="alertmanager"}[10m]) > 4) / count by (job) (up{job="alertmanager"})) >= 0.5`, in: `(count by (job) (changes(process_start_time_seconds{job="alertmanager"}[10m]) > 4) / count by (job) (up{job="alertmanager"})) >= 0.5`,
out: ` ( out: ` (
count by(job) ( count by (job) (
changes( changes(
process_start_time_seconds{job="alertmanager"}[10m] process_start_time_seconds{job="alertmanager"}[10m]
) )
@ -583,7 +585,7 @@ or
4 4
) )
/ /
count by(job) ( count by (job) (
up{job="alertmanager"} up{job="alertmanager"}
) )
) )
@ -630,7 +632,7 @@ func TestUnaryPretty(t *testing.T) {
in: `-histogram_quantile(0.99, sum by (le) (rate(foo[1m])))`, in: `-histogram_quantile(0.99, sum by (le) (rate(foo[1m])))`,
out: `-histogram_quantile( out: `-histogram_quantile(
0.99, 0.99,
sum by(le) ( sum by (le) (
rate( rate(
foo[1m] foo[1m]
) )
@ -659,8 +661,10 @@ func TestUnaryPretty(t *testing.T) {
}, },
} }
for _, test := range inputs { for _, test := range inputs {
expr, err := ParseExpr(test.in) t.Run(test.in, func(t *testing.T) {
require.NoError(t, err) expr, err := ParseExpr(test.in)
require.Equal(t, test.out, Prettify(expr)) require.NoError(t, err)
require.Equal(t, test.out, Prettify(expr))
})
} }
} }

View file

@ -77,9 +77,9 @@ func (node *AggregateExpr) getAggOpStr() string {
switch { switch {
case node.Without: case node.Without:
aggrString += fmt.Sprintf(" without(%s) ", strings.Join(node.Grouping, ", ")) aggrString += fmt.Sprintf(" without (%s) ", strings.Join(node.Grouping, ", "))
case len(node.Grouping) > 0: case len(node.Grouping) > 0:
aggrString += fmt.Sprintf(" by(%s) ", strings.Join(node.Grouping, ", ")) aggrString += fmt.Sprintf(" by (%s) ", strings.Join(node.Grouping, ", "))
} }
return aggrString return aggrString
@ -103,14 +103,14 @@ func (node *BinaryExpr) getMatchingStr() string {
if vm.On { if vm.On {
vmTag = "on" vmTag = "on"
} }
matching = fmt.Sprintf(" %s(%s)", vmTag, strings.Join(vm.MatchingLabels, ", ")) matching = fmt.Sprintf(" %s (%s)", vmTag, strings.Join(vm.MatchingLabels, ", "))
if vm.Card == CardManyToOne || vm.Card == CardOneToMany { if vm.Card == CardManyToOne || vm.Card == CardOneToMany {
vmCard := "right" vmCard := "right"
if vm.Card == CardManyToOne { if vm.Card == CardManyToOne {
vmCard = "left" vmCard = "left"
} }
matching += fmt.Sprintf(" group_%s(%s)", vmCard, strings.Join(vm.Include, ", ")) matching += fmt.Sprintf(" group_%s (%s)", vmCard, strings.Join(vm.Include, ", "))
} }
} }
return matching return matching

View file

@ -33,13 +33,16 @@ func TestExprString(t *testing.T) {
out: `sum(task:errors:rate10s{job="s"})`, out: `sum(task:errors:rate10s{job="s"})`,
}, },
{ {
in: `sum by(code) (task:errors:rate10s{job="s"})`, in: `sum by(code) (task:errors:rate10s{job="s"})`,
out: `sum by (code) (task:errors:rate10s{job="s"})`,
}, },
{ {
in: `sum without() (task:errors:rate10s{job="s"})`, in: `sum without() (task:errors:rate10s{job="s"})`,
out: `sum without () (task:errors:rate10s{job="s"})`,
}, },
{ {
in: `sum without(instance) (task:errors:rate10s{job="s"})`, in: `sum without(instance) (task:errors:rate10s{job="s"})`,
out: `sum without (instance) (task:errors:rate10s{job="s"})`,
}, },
{ {
in: `topk(5, task:errors:rate10s{job="s"})`, in: `topk(5, task:errors:rate10s{job="s"})`,
@ -48,26 +51,32 @@ func TestExprString(t *testing.T) {
in: `count_values("value", task:errors:rate10s{job="s"})`, in: `count_values("value", task:errors:rate10s{job="s"})`,
}, },
{ {
in: `a - on() c`, in: `a - on() c`,
out: `a - on () c`,
}, },
{ {
in: `a - on(b) c`, in: `a - on(b) c`,
out: `a - on (b) c`,
}, },
{ {
in: `a - on(b) group_left(x) c`, in: `a - on(b) group_left(x) c`,
out: `a - on (b) group_left (x) c`,
}, },
{ {
in: `a - on(b) group_left(x, y) c`, in: `a - on(b) group_left(x, y) c`,
out: `a - on (b) group_left (x, y) c`,
}, },
{ {
in: `a - on(b) group_left c`, in: `a - on(b) group_left c`,
out: `a - on(b) group_left() c`, out: `a - on (b) group_left () c`,
}, },
{ {
in: `a - on(b) group_left() (c)`, in: `a - on(b) group_left() (c)`,
out: `a - on (b) group_left () (c)`,
}, },
{ {
in: `a - ignoring(b) c`, in: `a - ignoring(b) c`,
out: `a - ignoring (b) c`,
}, },
{ {
in: `a - ignoring() c`, in: `a - ignoring() c`,

View file

@ -24,6 +24,7 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"go.uber.org/atomic"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -121,17 +122,17 @@ type AlertingRule struct {
externalURL string externalURL string
// true if old state has been restored. We start persisting samples for ALERT_FOR_STATE // true if old state has been restored. We start persisting samples for ALERT_FOR_STATE
// only after the restoration. // only after the restoration.
restored bool restored *atomic.Bool
// Protects the below.
mtx sync.Mutex
// Time in seconds taken to evaluate rule. // Time in seconds taken to evaluate rule.
evaluationDuration time.Duration evaluationDuration *atomic.Duration
// Timestamp of last evaluation of rule. // Timestamp of last evaluation of rule.
evaluationTimestamp time.Time evaluationTimestamp *atomic.Time
// The health of the alerting rule. // The health of the alerting rule.
health RuleHealth health *atomic.String
// The last error seen by the alerting rule. // The last error seen by the alerting rule.
lastError error lastError *atomic.Error
// activeMtx Protects the `active` map.
activeMtx sync.Mutex
// A map of alerts which are currently active (Pending or Firing), keyed by // A map of alerts which are currently active (Pending or Firing), keyed by
// the fingerprint of the labelset they correspond to. // the fingerprint of the labelset they correspond to.
active map[uint64]*Alert active map[uint64]*Alert
@ -151,17 +152,20 @@ func NewAlertingRule(
} }
return &AlertingRule{ return &AlertingRule{
name: name, name: name,
vector: vec, vector: vec,
holdDuration: hold, holdDuration: hold,
labels: labels, labels: labels,
annotations: annotations, annotations: annotations,
externalLabels: el, externalLabels: el,
externalURL: externalURL, externalURL: externalURL,
health: HealthUnknown, active: map[uint64]*Alert{},
active: map[uint64]*Alert{}, logger: logger,
logger: logger, restored: atomic.NewBool(restored),
restored: restored, health: atomic.NewString(string(HealthUnknown)),
evaluationTimestamp: atomic.NewTime(time.Time{}),
evaluationDuration: atomic.NewDuration(0),
lastError: atomic.NewError(nil),
} }
} }
@ -172,30 +176,22 @@ func (r *AlertingRule) Name() string {
// SetLastError sets the current error seen by the alerting rule. // SetLastError sets the current error seen by the alerting rule.
func (r *AlertingRule) SetLastError(err error) { func (r *AlertingRule) SetLastError(err error) {
r.mtx.Lock() r.lastError.Store(err)
defer r.mtx.Unlock()
r.lastError = err
} }
// LastError returns the last error seen by the alerting rule. // LastError returns the last error seen by the alerting rule.
func (r *AlertingRule) LastError() error { func (r *AlertingRule) LastError() error {
r.mtx.Lock() return r.lastError.Load()
defer r.mtx.Unlock()
return r.lastError
} }
// SetHealth sets the current health of the alerting rule. // SetHealth sets the current health of the alerting rule.
func (r *AlertingRule) SetHealth(health RuleHealth) { func (r *AlertingRule) SetHealth(health RuleHealth) {
r.mtx.Lock() r.health.Store(string(health))
defer r.mtx.Unlock()
r.health = health
} }
// Health returns the current health of the alerting rule. // Health returns the current health of the alerting rule.
func (r *AlertingRule) Health() RuleHealth { func (r *AlertingRule) Health() RuleHealth {
r.mtx.Lock() return RuleHealth(r.health.String())
defer r.mtx.Unlock()
return r.health
} }
// Query returns the query expression of the alerting rule. // Query returns the query expression of the alerting rule.
@ -283,42 +279,32 @@ func (r *AlertingRule) QueryforStateSeries(alert *Alert, q storage.Querier) (sto
// SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation. // SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation.
func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) { func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) {
r.mtx.Lock() r.evaluationDuration.Store(dur)
defer r.mtx.Unlock()
r.evaluationDuration = dur
} }
// GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule. // GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule.
func (r *AlertingRule) GetEvaluationDuration() time.Duration { func (r *AlertingRule) GetEvaluationDuration() time.Duration {
r.mtx.Lock() return r.evaluationDuration.Load()
defer r.mtx.Unlock()
return r.evaluationDuration
} }
// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated. // SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) { func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) {
r.mtx.Lock() r.evaluationTimestamp.Store(ts)
defer r.mtx.Unlock()
r.evaluationTimestamp = ts
} }
// GetEvaluationTimestamp returns the time the evaluation took place. // GetEvaluationTimestamp returns the time the evaluation took place.
func (r *AlertingRule) GetEvaluationTimestamp() time.Time { func (r *AlertingRule) GetEvaluationTimestamp() time.Time {
r.mtx.Lock() return r.evaluationTimestamp.Load()
defer r.mtx.Unlock()
return r.evaluationTimestamp
} }
// SetRestored updates the restoration state of the alerting rule. // SetRestored updates the restoration state of the alerting rule.
func (r *AlertingRule) SetRestored(restored bool) { func (r *AlertingRule) SetRestored(restored bool) {
r.restored = restored r.restored.Store(restored)
} }
// Restored returns the restoration state of the alerting rule. // Restored returns the restoration state of the alerting rule.
func (r *AlertingRule) Restored() bool { func (r *AlertingRule) Restored() bool {
r.mtx.Lock() return r.restored.Load()
defer r.mtx.Unlock()
return r.restored
} }
// resolvedRetention is the duration for which a resolved alert instance // resolvedRetention is the duration for which a resolved alert instance
@ -333,9 +319,6 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
return nil, err return nil, err
} }
r.mtx.Lock()
defer r.mtx.Unlock()
// Create pending alerts for any new vector elements in the alert expression // Create pending alerts for any new vector elements in the alert expression
// or update the expression value for existing elements. // or update the expression value for existing elements.
resultFPs := map[uint64]struct{}{} resultFPs := map[uint64]struct{}{}
@ -407,6 +390,9 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
} }
} }
r.activeMtx.Lock()
defer r.activeMtx.Unlock()
for h, a := range alerts { for h, a := range alerts {
// Check whether we already have alerting state for the identifying label set. // Check whether we already have alerting state for the identifying label set.
// Update the last value and annotations if so, create a new alert entry otherwise. // Update the last value and annotations if so, create a new alert entry otherwise.
@ -441,7 +427,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
a.FiredAt = ts a.FiredAt = ts
} }
if r.restored { if r.restored.Load() {
vec = append(vec, r.sample(a, ts)) vec = append(vec, r.sample(a, ts))
vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix()))) vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix())))
} }
@ -458,8 +444,8 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
// State returns the maximum state of alert instances for this rule. // State returns the maximum state of alert instances for this rule.
// StateFiring > StatePending > StateInactive // StateFiring > StatePending > StateInactive
func (r *AlertingRule) State() AlertState { func (r *AlertingRule) State() AlertState {
r.mtx.Lock() r.activeMtx.Lock()
defer r.mtx.Unlock() defer r.activeMtx.Unlock()
maxState := StateInactive maxState := StateInactive
for _, a := range r.active { for _, a := range r.active {
@ -484,8 +470,8 @@ func (r *AlertingRule) ActiveAlerts() []*Alert {
// currentAlerts returns all instances of alerts for this rule. This may include // currentAlerts returns all instances of alerts for this rule. This may include
// inactive alerts that were previously firing. // inactive alerts that were previously firing.
func (r *AlertingRule) currentAlerts() []*Alert { func (r *AlertingRule) currentAlerts() []*Alert {
r.mtx.Lock() r.activeMtx.Lock()
defer r.mtx.Unlock() defer r.activeMtx.Unlock()
alerts := make([]*Alert, 0, len(r.active)) alerts := make([]*Alert, 0, len(r.active))
@ -501,8 +487,8 @@ func (r *AlertingRule) currentAlerts() []*Alert {
// and not on its copy. // and not on its copy.
// If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'. // If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'.
func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) { func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) {
r.mtx.Lock() r.activeMtx.Lock()
defer r.mtx.Unlock() defer r.activeMtx.Unlock()
for _, a := range r.active { for _, a := range r.active {
f(a) f(a)

View file

@ -416,6 +416,83 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
require.Equal(t, result, filteredRes) require.Equal(t, result, filteredRes)
} }
func TestAlertingRuleQueryInTemplate(t *testing.T) {
suite, err := promql.NewTest(t, `
load 1m
http_requests{job="app-server", instance="0"} 70 85 70 70
`)
require.NoError(t, err)
defer suite.Close()
require.NoError(t, suite.Run())
expr, err := parser.ParseExpr(`sum(http_requests) < 100`)
require.NoError(t, err)
ruleWithQueryInTemplate := NewAlertingRule(
"ruleWithQueryInTemplate",
expr,
time.Minute,
labels.FromStrings("label", "value"),
labels.FromStrings("templated_label", `{{- with "sort(sum(http_requests) by (instance))" | query -}}
{{- range $i,$v := . -}}
instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
{{- end -}}
{{- end -}}
`),
nil,
"",
true, log.NewNopLogger(),
)
evalTime := time.Unix(0, 0)
startQueryCh := make(chan struct{})
getDoneCh := make(chan struct{})
slowQueryFunc := func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) {
if q == "sort(sum(http_requests) by (instance))" {
// This is a minimum reproduction of issue 10703, expand template with query.
close(startQueryCh)
select {
case <-getDoneCh:
case <-time.After(time.Millisecond * 10):
// Assert no blocking when template expanding.
require.Fail(t, "unexpected blocking when template expanding.")
}
}
return EngineQueryFunc(suite.QueryEngine(), suite.Storage())(ctx, q, ts)
}
go func() {
<-startQueryCh
_ = ruleWithQueryInTemplate.Health()
_ = ruleWithQueryInTemplate.LastError()
_ = ruleWithQueryInTemplate.GetEvaluationDuration()
_ = ruleWithQueryInTemplate.GetEvaluationTimestamp()
close(getDoneCh)
}()
_, err = ruleWithQueryInTemplate.Eval(
suite.Context(), evalTime, slowQueryFunc, nil, 0,
)
require.NoError(t, err)
}
func BenchmarkAlertingRuleAtomicField(b *testing.B) {
b.ReportAllocs()
rule := NewAlertingRule("bench", nil, 0, nil, nil, nil, "", true, nil)
done := make(chan struct{})
go func() {
for i := 0; i < b.N; i++ {
rule.GetEvaluationTimestamp()
}
close(done)
}()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
rule.SetEvaluationTimestamp(time.Now())
}
})
<-done
}
func TestAlertingRuleDuplicate(t *testing.T) { func TestAlertingRuleDuplicate(t *testing.T) {
storage := teststorage.New(t) storage := teststorage.New(t)
defer storage.Close() defer storage.Close()

View file

@ -26,6 +26,7 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/atomic"
"go.uber.org/goleak" "go.uber.org/goleak"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
@ -1291,17 +1292,20 @@ func TestUpdateMissedEvalMetrics(t *testing.T) {
m[1] = activeAlert m[1] = activeAlert
rule := &AlertingRule{ rule := &AlertingRule{
name: "HTTPRequestRateLow", name: "HTTPRequestRateLow",
vector: expr, vector: expr,
holdDuration: 5 * time.Minute, holdDuration: 5 * time.Minute,
labels: labels.FromStrings("severity", "critical"), labels: labels.FromStrings("severity", "critical"),
annotations: nil, annotations: nil,
externalLabels: nil, externalLabels: nil,
externalURL: "", externalURL: "",
health: HealthUnknown, active: m,
active: m, logger: nil,
logger: nil, restored: atomic.NewBool(true),
restored: true, health: atomic.NewString(string(HealthUnknown)),
evaluationTimestamp: atomic.NewTime(time.Time{}),
evaluationDuration: atomic.NewDuration(0),
lastError: atomic.NewError(nil),
} }
group := NewGroup(GroupOptions{ group := NewGroup(GroupOptions{

View file

@ -17,9 +17,9 @@ import (
"context" "context"
"fmt" "fmt"
"net/url" "net/url"
"sync"
"time" "time"
"go.uber.org/atomic"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -33,25 +33,26 @@ type RecordingRule struct {
name string name string
vector parser.Expr vector parser.Expr
labels labels.Labels labels labels.Labels
// Protects the below.
mtx sync.Mutex
// The health of the recording rule. // The health of the recording rule.
health RuleHealth health *atomic.String
// Timestamp of last evaluation of the recording rule. // Timestamp of last evaluation of the recording rule.
evaluationTimestamp time.Time evaluationTimestamp *atomic.Time
// The last error seen by the recording rule. // The last error seen by the recording rule.
lastError error lastError *atomic.Error
// Duration of how long it took to evaluate the recording rule. // Duration of how long it took to evaluate the recording rule.
evaluationDuration time.Duration evaluationDuration *atomic.Duration
} }
// NewRecordingRule returns a new recording rule. // NewRecordingRule returns a new recording rule.
func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *RecordingRule { func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *RecordingRule {
return &RecordingRule{ return &RecordingRule{
name: name, name: name,
vector: vector, vector: vector,
health: HealthUnknown, labels: lset,
labels: lset, health: atomic.NewString(string(HealthUnknown)),
evaluationTimestamp: atomic.NewTime(time.Time{}),
evaluationDuration: atomic.NewDuration(0),
lastError: atomic.NewError(nil),
} }
} }
@ -124,56 +125,40 @@ func (rule *RecordingRule) String() string {
// SetEvaluationDuration updates evaluationDuration to the time in seconds it took to evaluate the rule on its last evaluation. // SetEvaluationDuration updates evaluationDuration to the time in seconds it took to evaluate the rule on its last evaluation.
func (rule *RecordingRule) SetEvaluationDuration(dur time.Duration) { func (rule *RecordingRule) SetEvaluationDuration(dur time.Duration) {
rule.mtx.Lock() rule.evaluationDuration.Store(dur)
defer rule.mtx.Unlock()
rule.evaluationDuration = dur
} }
// SetLastError sets the current error seen by the recording rule. // SetLastError sets the current error seen by the recording rule.
func (rule *RecordingRule) SetLastError(err error) { func (rule *RecordingRule) SetLastError(err error) {
rule.mtx.Lock() rule.lastError.Store(err)
defer rule.mtx.Unlock()
rule.lastError = err
} }
// LastError returns the last error seen by the recording rule. // LastError returns the last error seen by the recording rule.
func (rule *RecordingRule) LastError() error { func (rule *RecordingRule) LastError() error {
rule.mtx.Lock() return rule.lastError.Load()
defer rule.mtx.Unlock()
return rule.lastError
} }
// SetHealth sets the current health of the recording rule. // SetHealth sets the current health of the recording rule.
func (rule *RecordingRule) SetHealth(health RuleHealth) { func (rule *RecordingRule) SetHealth(health RuleHealth) {
rule.mtx.Lock() rule.health.Store(string(health))
defer rule.mtx.Unlock()
rule.health = health
} }
// Health returns the current health of the recording rule. // Health returns the current health of the recording rule.
func (rule *RecordingRule) Health() RuleHealth { func (rule *RecordingRule) Health() RuleHealth {
rule.mtx.Lock() return RuleHealth(rule.health.Load())
defer rule.mtx.Unlock()
return rule.health
} }
// GetEvaluationDuration returns the time in seconds it took to evaluate the recording rule. // GetEvaluationDuration returns the time in seconds it took to evaluate the recording rule.
func (rule *RecordingRule) GetEvaluationDuration() time.Duration { func (rule *RecordingRule) GetEvaluationDuration() time.Duration {
rule.mtx.Lock() return rule.evaluationDuration.Load()
defer rule.mtx.Unlock()
return rule.evaluationDuration
} }
// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated. // SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
func (rule *RecordingRule) SetEvaluationTimestamp(ts time.Time) { func (rule *RecordingRule) SetEvaluationTimestamp(ts time.Time) {
rule.mtx.Lock() rule.evaluationTimestamp.Store(ts)
defer rule.mtx.Unlock()
rule.evaluationTimestamp = ts
} }
// GetEvaluationTimestamp returns the time the evaluation took place. // GetEvaluationTimestamp returns the time the evaluation took place.
func (rule *RecordingRule) GetEvaluationTimestamp() time.Time { func (rule *RecordingRule) GetEvaluationTimestamp() time.Time {
rule.mtx.Lock() return rule.evaluationTimestamp.Load()
defer rule.mtx.Unlock()
return rule.evaluationTimestamp
} }

View file

@ -22,6 +22,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
) )
@ -44,6 +45,11 @@ func (a nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.E
func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram) (storage.SeriesRef, error) { func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram) (storage.SeriesRef, error) {
return 0, nil return 0, nil
} }
func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
return 0, nil
}
func (a nopAppender) Commit() error { return nil } func (a nopAppender) Commit() error { return nil }
func (a nopAppender) Rollback() error { return nil } func (a nopAppender) Rollback() error { return nil }
@ -70,6 +76,8 @@ type collectResultAppender struct {
resultHistograms []histogramSample resultHistograms []histogramSample
pendingHistograms []histogramSample pendingHistograms []histogramSample
rolledbackHistograms []histogramSample rolledbackHistograms []histogramSample
pendingMetadata []metadata.Metadata
resultMetadata []metadata.Metadata
} }
func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
@ -111,13 +119,27 @@ func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.
return a.next.AppendHistogram(ref, l, t, h) return a.next.AppendHistogram(ref, l, t, h)
} }
func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
a.pendingMetadata = append(a.pendingMetadata, m)
if ref == 0 {
ref = storage.SeriesRef(rand.Uint64())
}
if a.next == nil {
return ref, nil
}
return a.next.UpdateMetadata(ref, l, m)
}
func (a *collectResultAppender) Commit() error { func (a *collectResultAppender) Commit() error {
a.result = append(a.result, a.pendingResult...) a.result = append(a.result, a.pendingResult...)
a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...) a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...)
a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...) a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...)
a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...)
a.pendingResult = nil a.pendingResult = nil
a.pendingExemplars = nil a.pendingExemplars = nil
a.pendingHistograms = nil a.pendingHistograms = nil
a.pendingMetadata = nil
if a.next == nil { if a.next == nil {
return nil return nil
} }

View file

@ -124,7 +124,8 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager
// Options are the configuration parameters to the scrape manager. // Options are the configuration parameters to the scrape manager.
type Options struct { type Options struct {
ExtraMetrics bool ExtraMetrics bool
NoDefaultPort bool
// Option used by downstream scraper users like OpenTelemetry Collector // Option used by downstream scraper users like OpenTelemetry Collector
// to help lookup metric metadata. Should be false for Prometheus. // to help lookup metric metadata. Should be false for Prometheus.
PassMetadataInContext bool PassMetadataInContext bool
@ -207,7 +208,7 @@ func (m *Manager) reload() {
level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
continue continue
} }
sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts.ExtraMetrics, m.opts.PassMetadataInContext, m.opts.HTTPClientOptions) sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts)
if err != nil { if err != nil {
level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName)
continue continue

View file

@ -31,11 +31,12 @@ import (
func TestPopulateLabels(t *testing.T) { func TestPopulateLabels(t *testing.T) {
cases := []struct { cases := []struct {
in labels.Labels in labels.Labels
cfg *config.ScrapeConfig cfg *config.ScrapeConfig
res labels.Labels noDefaultPort bool
resOrig labels.Labels res labels.Labels
err string resOrig labels.Labels
err string
}{ }{
// Regular population of scrape config options. // Regular population of scrape config options.
{ {
@ -331,11 +332,104 @@ func TestPopulateLabels(t *testing.T) {
resOrig: nil, resOrig: nil,
err: "scrape timeout cannot be greater than scrape interval (\"2s\" > \"1s\")", err: "scrape timeout cannot be greater than scrape interval (\"2s\" > \"1s\")",
}, },
// Don't attach default port.
{
in: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4",
}),
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
JobName: "job",
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
noDefaultPort: true,
res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4",
model.InstanceLabel: "1.2.3.4",
model.SchemeLabel: "https",
model.MetricsPathLabel: "/metrics",
model.JobLabel: "job",
model.ScrapeIntervalLabel: "1s",
model.ScrapeTimeoutLabel: "1s",
}),
resOrig: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4",
model.SchemeLabel: "https",
model.MetricsPathLabel: "/metrics",
model.JobLabel: "job",
model.ScrapeIntervalLabel: "1s",
model.ScrapeTimeoutLabel: "1s",
}),
},
// Remove default port (http).
{
in: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:80",
}),
cfg: &config.ScrapeConfig{
Scheme: "http",
MetricsPath: "/metrics",
JobName: "job",
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
noDefaultPort: true,
res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4",
model.InstanceLabel: "1.2.3.4:80",
model.SchemeLabel: "http",
model.MetricsPathLabel: "/metrics",
model.JobLabel: "job",
model.ScrapeIntervalLabel: "1s",
model.ScrapeTimeoutLabel: "1s",
}),
resOrig: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:80",
model.SchemeLabel: "http",
model.MetricsPathLabel: "/metrics",
model.JobLabel: "job",
model.ScrapeIntervalLabel: "1s",
model.ScrapeTimeoutLabel: "1s",
}),
},
// Remove default port (https).
{
in: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:443",
}),
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
JobName: "job",
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
noDefaultPort: true,
res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4",
model.InstanceLabel: "1.2.3.4:443",
model.SchemeLabel: "https",
model.MetricsPathLabel: "/metrics",
model.JobLabel: "job",
model.ScrapeIntervalLabel: "1s",
model.ScrapeTimeoutLabel: "1s",
}),
resOrig: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:443",
model.SchemeLabel: "https",
model.MetricsPathLabel: "/metrics",
model.JobLabel: "job",
model.ScrapeIntervalLabel: "1s",
model.ScrapeTimeoutLabel: "1s",
}),
},
} }
for _, c := range cases { for _, c := range cases {
in := c.in.Copy() in := c.in.Copy()
res, orig, err := PopulateLabels(c.in, c.cfg) res, orig, err := PopulateLabels(c.in, c.cfg, c.noDefaultPort)
if c.err != "" { if c.err != "" {
require.EqualError(t, err, c.err) require.EqualError(t, err, c.err)
} else { } else {

View file

@ -240,6 +240,8 @@ type scrapePool struct {
// Constructor for new scrape loops. This is settable for testing convenience. // Constructor for new scrape loops. This is settable for testing convenience.
newLoop func(scrapeLoopOptions) loop newLoop func(scrapeLoopOptions) loop
noDefaultPort bool
} }
type labelLimits struct { type labelLimits struct {
@ -265,13 +267,13 @@ const maxAheadTime = 10 * time.Minute
type labelsMutator func(labels.Labels) labels.Labels type labelsMutator func(labels.Labels) labels.Labels
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportExtraMetrics, passMetadataInContext bool, httpOpts []config_util.HTTPClientOption) (*scrapePool, error) { func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) {
targetScrapePools.Inc() targetScrapePools.Inc()
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, httpOpts...) client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...)
if err != nil { if err != nil {
targetScrapePoolsFailed.Inc() targetScrapePoolsFailed.Inc()
return nil, errors.Wrap(err, "error creating HTTP client") return nil, errors.Wrap(err, "error creating HTTP client")
@ -288,7 +290,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
activeTargets: map[uint64]*Target{}, activeTargets: map[uint64]*Target{},
loops: map[uint64]loop{}, loops: map[uint64]loop{},
logger: logger, logger: logger,
httpOpts: httpOpts, httpOpts: options.HTTPClientOptions,
noDefaultPort: options.NoDefaultPort,
} }
sp.newLoop = func(opts scrapeLoopOptions) loop { sp.newLoop = func(opts scrapeLoopOptions) loop {
// Update the targets retrieval function for metadata to a new scrape cache. // Update the targets retrieval function for metadata to a new scrape cache.
@ -315,10 +318,10 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
opts.labelLimits, opts.labelLimits,
opts.interval, opts.interval,
opts.timeout, opts.timeout,
reportExtraMetrics, options.ExtraMetrics,
opts.target, opts.target,
cache, cache,
passMetadataInContext, options.PassMetadataInContext,
) )
} }
@ -481,7 +484,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
var all []*Target var all []*Target
sp.droppedTargets = []*Target{} sp.droppedTargets = []*Target{}
for _, tg := range tgs { for _, tg := range tgs {
targets, failures := TargetsFromGroup(tg, sp.config) targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort)
for _, err := range failures { for _, err := range failures {
level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) level.Error(sp.logger).Log("msg", "Creating target failed", "err", err)
} }

View file

@ -57,7 +57,7 @@ func TestNewScrapePool(t *testing.T) {
var ( var (
app = &nopAppendable{} app = &nopAppendable{}
cfg = &config.ScrapeConfig{} cfg = &config.ScrapeConfig{}
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil) sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
) )
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app { if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
@ -92,7 +92,7 @@ func TestDroppedTargetsList(t *testing.T) {
}, },
}, },
} }
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil) sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}" expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
expectedLength = 1 expectedLength = 1
) )
@ -489,7 +489,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
func TestScrapePoolAppender(t *testing.T) { func TestScrapePoolAppender(t *testing.T) {
cfg := &config.ScrapeConfig{} cfg := &config.ScrapeConfig{}
app := &nopAppendable{} app := &nopAppendable{}
sp, _ := newScrapePool(cfg, app, 0, nil, false, false, nil) sp, _ := newScrapePool(cfg, app, 0, nil, &Options{})
loop := sp.newLoop(scrapeLoopOptions{ loop := sp.newLoop(scrapeLoopOptions{
target: &Target{}, target: &Target{},
@ -531,7 +531,7 @@ func TestScrapePoolRaces(t *testing.T) {
newConfig := func() *config.ScrapeConfig { newConfig := func() *config.ScrapeConfig {
return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout} return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout}
} }
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, false, false, nil) sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, &Options{})
tgts := []*targetgroup.Group{ tgts := []*targetgroup.Group{
{ {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
@ -2627,7 +2627,7 @@ func TestReuseScrapeCache(t *testing.T) {
ScrapeInterval: model.Duration(5 * time.Second), ScrapeInterval: model.Duration(5 * time.Second),
MetricsPath: "/metrics", MetricsPath: "/metrics",
} }
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil) sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
t1 = &Target{ t1 = &Target{
discoveredLabels: labels.Labels{ discoveredLabels: labels.Labels{
labels.Label{ labels.Label{
@ -2840,7 +2840,7 @@ func TestReuseCacheRace(t *testing.T) {
ScrapeInterval: model.Duration(5 * time.Second), ScrapeInterval: model.Duration(5 * time.Second),
MetricsPath: "/metrics", MetricsPath: "/metrics",
} }
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil) sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
t1 = &Target{ t1 = &Target{
discoveredLabels: labels.Labels{ discoveredLabels: labels.Labels{
labels.Label{ labels.Label{
@ -2972,7 +2972,7 @@ func TestScrapeReportLimit(t *testing.T) {
})) }))
defer ts.Close() defer ts.Close()
sp, err := newScrapePool(cfg, s, 0, nil, false, false, nil) sp, err := newScrapePool(cfg, s, 0, nil, &Options{})
require.NoError(t, err) require.NoError(t, err)
defer sp.stop() defer sp.stop()
@ -3142,7 +3142,7 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
}, },
}, },
} }
sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, false, false, nil) sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, &Options{})
tgts := []*targetgroup.Group{ tgts := []*targetgroup.Group{
{ {
Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}}, Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}},

View file

@ -351,7 +351,7 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels,
// PopulateLabels builds a label set from the given label set and scrape configuration. // PopulateLabels builds a label set from the given label set and scrape configuration.
// It returns a label set before relabeling was applied as the second return value. // It returns a label set before relabeling was applied as the second return value.
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) {
// Copy labels into the labelset for the target if they are not set already. // Copy labels into the labelset for the target if they are not set already.
scrapeLabels := []labels.Label{ scrapeLabels := []labels.Label{
{Name: model.JobLabel, Value: cfg.JobName}, {Name: model.JobLabel, Value: cfg.JobName},
@ -389,21 +389,25 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
// addPort checks whether we should add a default port to the address. // addPort checks whether we should add a default port to the address.
// If the address is not valid, we don't append a port either. // If the address is not valid, we don't append a port either.
addPort := func(s string) bool { addPort := func(s string) (string, string, bool) {
// If we can split, a port exists and we don't have to add one. // If we can split, a port exists and we don't have to add one.
if _, _, err := net.SplitHostPort(s); err == nil { if host, port, err := net.SplitHostPort(s); err == nil {
return false return host, port, false
} }
// If adding a port makes it valid, the previous error // If adding a port makes it valid, the previous error
// was not due to an invalid address and we can append a port. // was not due to an invalid address and we can append a port.
_, _, err := net.SplitHostPort(s + ":1234") _, _, err := net.SplitHostPort(s + ":1234")
return err == nil return "", "", err == nil
} }
addr := lset.Get(model.AddressLabel) addr := lset.Get(model.AddressLabel)
// If it's an address with no trailing port, infer it based on the used scheme. scheme := lset.Get(model.SchemeLabel)
if addPort(addr) { host, port, add := addPort(addr)
// If it's an address with no trailing port, infer it based on the used scheme
// unless the no-default-scrape-port feature flag is present.
if !noDefaultPort && add {
// Addresses reaching this point are already wrapped in [] if necessary. // Addresses reaching this point are already wrapped in [] if necessary.
switch lset.Get(model.SchemeLabel) { switch scheme {
case "http", "": case "http", "":
addr = addr + ":80" addr = addr + ":80"
case "https": case "https":
@ -414,6 +418,21 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
lb.Set(model.AddressLabel, addr) lb.Set(model.AddressLabel, addr)
} }
if noDefaultPort {
// If it's an address with a trailing default port and the
// no-default-scrape-port flag is present, remove the port.
switch port {
case "80":
if scheme == "http" {
lb.Set(model.AddressLabel, host)
}
case "443":
if scheme == "https" {
lb.Set(model.AddressLabel, host)
}
}
}
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
return nil, nil, err return nil, nil, err
} }
@ -464,7 +483,7 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
} }
// TargetsFromGroup builds targets based on the given TargetGroup and config. // TargetsFromGroup builds targets based on the given TargetGroup and config.
func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Target, []error) { func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool) ([]*Target, []error) {
targets := make([]*Target, 0, len(tg.Targets)) targets := make([]*Target, 0, len(tg.Targets))
failures := []error{} failures := []error{}
@ -482,7 +501,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Targe
lset := labels.New(lbls...) lset := labels.New(lbls...)
lbls, origLabels, err := PopulateLabels(lset, cfg) lbls, origLabels, err := PopulateLabels(lset, cfg, noDefaultPort)
if err != nil { if err != nil {
failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg)) failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg))
} }

View file

@ -375,7 +375,7 @@ func TestTargetsFromGroup(t *testing.T) {
ScrapeTimeout: model.Duration(10 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeInterval: model.Duration(1 * time.Minute), ScrapeInterval: model.Duration(1 * time.Minute),
} }
targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg) targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, false)
if len(targets) != 1 { if len(targets) != 1 {
t.Fatalf("Expected 1 target, got %v", len(targets)) t.Fatalf("Expected 1 target, got %v", len(targets))
} }

View file

@ -23,6 +23,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
) )
@ -187,6 +188,20 @@ func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64
return ref, nil return ref, nil
} }
func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) {
ref, err := f.primary.UpdateMetadata(ref, l, m)
if err != nil {
return ref, err
}
for _, appender := range f.secondaries {
if _, err := appender.UpdateMetadata(ref, l, m); err != nil {
return 0, err
}
}
return ref, nil
}
func (f *fanoutAppender) Commit() (err error) { func (f *fanoutAppender) Commit() (err error) {
err = f.primary.Commit() err = f.primary.Commit()

View file

@ -21,6 +21,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
) )
@ -229,8 +230,10 @@ type Appender interface {
// Rollback rolls back all modifications made in the appender so far. // Rollback rolls back all modifications made in the appender so far.
// Appender has to be discarded after rollback. // Appender has to be discarded after rollback.
Rollback() error Rollback() error
ExemplarAppender ExemplarAppender
HistogramAppender HistogramAppender
MetadataUpdater
} }
// GetRef is an extra interface on Appenders used by downstream projects // GetRef is an extra interface on Appenders used by downstream projects
@ -275,6 +278,18 @@ type HistogramAppender interface {
AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error)
} }
// MetadataUpdater provides an interface for associating metadata to stored series.
type MetadataUpdater interface {
// UpdateMetadata updates a metadata entry for the given series and labels.
// A series reference number is returned which can be used to modify the
// metadata of the given series in the same or later transactions.
// Returned reference numbers are ephemeral and may be rejected in calls
// to UpdateMetadata() at any point. If the series does not exist,
// UpdateMetadata returns an error.
// If the reference is 0 it must not be used for caching.
UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error)
}
// SeriesSet contains a set of series. // SeriesSet contains a set of series.
type SeriesSet interface { type SeriesSet interface {
Next() bool Next() bool

View file

@ -28,6 +28,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/wal" "github.com/prometheus/prometheus/tsdb/wal"
) )
@ -285,6 +286,12 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels,
return 0, nil return 0, nil
} }
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
// UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now.
return 0, nil
}
// Commit implements storage.Appender. // Commit implements storage.Appender.
func (t *timestampTracker) Commit() error { func (t *timestampTracker) Commit() error {
t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms) t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms)

View file

@ -28,6 +28,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
) )
@ -234,3 +235,9 @@ func (m *mockAppendable) AppendHistogram(ref storage.SeriesRef, l labels.Labels,
m.histograms = append(m.histograms, mockHistogram{l, t, h}) m.histograms = append(m.histograms, mockHistogram{l, t, h})
return 0, nil return 0, nil
} }
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Wire metadata in a mockAppendable field when we get around to handling metadata in remote_write.
// UpdateMetadata is no-op for remote write (where mockAppendable is being used to test) for now.
return 0, nil
}

View file

@ -45,6 +45,8 @@ var (
Name: "prometheus_template_text_expansions_total", Name: "prometheus_template_text_expansions_total",
Help: "The total number of template text expansions.", Help: "The total number of template text expansions.",
}) })
errNaNOrInf = errors.New("value is NaN or Inf")
) )
func init() { func init() {
@ -315,15 +317,24 @@ func NewTemplateExpander(
if err != nil { if err != nil {
return "", err return "", err
} }
if math.IsNaN(v) || math.IsInf(v, 0) {
tm, err := floatToTime(v)
switch {
case errors.Is(err, errNaNOrInf):
return fmt.Sprintf("%.4g", v), nil return fmt.Sprintf("%.4g", v), nil
case err != nil:
return "", err
} }
timestamp := v * 1e9
if timestamp > math.MaxInt64 || timestamp < math.MinInt64 { return fmt.Sprint(tm), nil
return "", fmt.Errorf("%v cannot be represented as a nanoseconds timestamp since it overflows int64", v) },
"toTime": func(i interface{}) (*time.Time, error) {
v, err := convertToFloat(i)
if err != nil {
return nil, err
} }
t := model.TimeFromUnixNano(int64(timestamp)).Time().UTC()
return fmt.Sprint(t), nil return floatToTime(v)
}, },
"pathPrefix": func() string { "pathPrefix": func() string {
return externalURL.Path return externalURL.Path
@ -446,3 +457,15 @@ func (te Expander) ParseTest() error {
} }
return nil return nil
} }
func floatToTime(v float64) (*time.Time, error) {
if math.IsNaN(v) || math.IsInf(v, 0) {
return nil, errNaNOrInf
}
timestamp := v * 1e9
if timestamp > math.MaxInt64 || timestamp < math.MinInt64 {
return nil, fmt.Errorf("%v cannot be represented as a nanoseconds timestamp since it overflows int64", v)
}
t := model.TimeFromUnixNano(int64(timestamp)).Time().UTC()
return &t, nil
}

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"math" "math"
"net/url" "net/url"
"reflect"
"testing" "testing"
"time" "time"
@ -429,6 +430,16 @@ func TestTemplateExpansion(t *testing.T) {
text: `{{ "1435065584.128" | humanizeTimestamp }}`, text: `{{ "1435065584.128" | humanizeTimestamp }}`,
output: "2015-06-23 13:19:44.128 +0000 UTC", output: "2015-06-23 13:19:44.128 +0000 UTC",
}, },
{
// ToTime - model.SampleValue input - float64.
text: `{{ (1435065584.128 | toTime).Format "2006" }}`,
output: "2015",
},
{
// ToTime - model.SampleValue input - string.
text: `{{ ("1435065584.128" | toTime).Format "2006" }}`,
output: "2015",
},
{ {
// Title. // Title.
text: "{{ \"aa bb CC\" | title }}", text: "{{ \"aa bb CC\" | title }}",
@ -560,3 +571,55 @@ func testTemplateExpansion(t *testing.T, scenarios []scenario) {
} }
} }
} }
func Test_floatToTime(t *testing.T) {
type args struct {
v float64
}
tests := []struct {
name string
args args
want *time.Time
wantErr bool
}{
{
"happy path",
args{
v: 1657155181,
},
func() *time.Time {
tm := time.Date(2022, 7, 7, 0, 53, 1, 0, time.UTC)
return &tm
}(),
false,
},
{
"more than math.MaxInt64",
args{
v: 1.79769313486231570814527423731704356798070e+300,
},
nil,
true,
},
{
"less than math.MinInt64",
args{
v: -1.79769313486231570814527423731704356798070e+300,
},
nil,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := floatToTime(tt.args.v)
if (err != nil) != tt.wantErr {
t.Errorf("floatToTime() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("floatToTime() got = %v, want %v", got, tt.want)
}
})
}
}

View file

@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/sdk/resource"
tracesdk "go.opentelemetry.io/otel/sdk/trace" tracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0" semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"

View file

@ -32,6 +32,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/storage/remote"
@ -814,7 +815,12 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exem
} }
func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
// remote_write doesn't support histograms yet, so do nothing here. // TODO: Add histogram support.
return 0, nil
}
func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Wire metadata in the Agent's appender.
return 0, nil return 0, nil
} }

View file

@ -41,6 +41,7 @@ import (
"go.uber.org/goleak" "go.uber.org/goleak"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
@ -3490,3 +3491,254 @@ func TestDBPanicOnMmappingHeadChunk(t *testing.T) {
require.NoError(t, db.Close()) require.NoError(t, db.Close())
} }
func TestMetadataInWAL(t *testing.T) {
updateMetadata := func(t *testing.T, app storage.Appender, s labels.Labels, m metadata.Metadata) {
_, err := app.UpdateMetadata(0, s, m)
require.NoError(t, err)
}
db := newTestDB(t)
ctx := context.Background()
// Add some series so we can append metadata to them.
app := db.Appender(ctx)
s1 := labels.FromStrings("a", "b")
s2 := labels.FromStrings("c", "d")
s3 := labels.FromStrings("e", "f")
s4 := labels.FromStrings("g", "h")
for _, s := range []labels.Labels{s1, s2, s3, s4} {
_, err := app.Append(0, s, 0, 0)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Add a first round of metadata to the first three series.
// Re-take the Appender, as the previous Commit will have it closed.
m1 := metadata.Metadata{Type: "gauge", Unit: "unit_1", Help: "help_1"}
m2 := metadata.Metadata{Type: "gauge", Unit: "unit_2", Help: "help_2"}
m3 := metadata.Metadata{Type: "gauge", Unit: "unit_3", Help: "help_3"}
app = db.Appender(ctx)
updateMetadata(t, app, s1, m1)
updateMetadata(t, app, s2, m2)
updateMetadata(t, app, s3, m3)
require.NoError(t, app.Commit())
// Add a replicated metadata entry to the first series,
// a completely new metadata entry for the fourth series,
// and a changed metadata entry to the second series.
m4 := metadata.Metadata{Type: "counter", Unit: "unit_4", Help: "help_4"}
m5 := metadata.Metadata{Type: "counter", Unit: "unit_5", Help: "help_5"}
app = db.Appender(ctx)
updateMetadata(t, app, s1, m1)
updateMetadata(t, app, s4, m4)
updateMetadata(t, app, s2, m5)
require.NoError(t, app.Commit())
// Read the WAL to see if the disk storage format is correct.
recs := readTestWAL(t, path.Join(db.Dir(), "wal"))
var gotMetadataBlocks [][]record.RefMetadata
for _, rec := range recs {
if mr, ok := rec.([]record.RefMetadata); ok {
gotMetadataBlocks = append(gotMetadataBlocks, mr)
}
}
expectedMetadata := []record.RefMetadata{
{Ref: 1, Type: record.GetMetricType(m1.Type), Unit: m1.Unit, Help: m1.Help},
{Ref: 2, Type: record.GetMetricType(m2.Type), Unit: m2.Unit, Help: m2.Help},
{Ref: 3, Type: record.GetMetricType(m3.Type), Unit: m3.Unit, Help: m3.Help},
{Ref: 4, Type: record.GetMetricType(m4.Type), Unit: m4.Unit, Help: m4.Help},
{Ref: 2, Type: record.GetMetricType(m5.Type), Unit: m5.Unit, Help: m5.Help},
}
require.Len(t, gotMetadataBlocks, 2)
require.Equal(t, expectedMetadata[:3], gotMetadataBlocks[0])
require.Equal(t, expectedMetadata[3:], gotMetadataBlocks[1])
}
func TestMetadataCheckpointingOnlyKeepsLatestEntry(t *testing.T) {
updateMetadata := func(t *testing.T, app storage.Appender, s labels.Labels, m metadata.Metadata) {
_, err := app.UpdateMetadata(0, s, m)
require.NoError(t, err)
}
ctx := context.Background()
numSamples := 10000
hb, w := newTestHead(t, int64(numSamples)*10, false)
// Add some series so we can append metadata to them.
app := hb.Appender(ctx)
s1 := labels.FromStrings("a", "b")
s2 := labels.FromStrings("c", "d")
s3 := labels.FromStrings("e", "f")
s4 := labels.FromStrings("g", "h")
for _, s := range []labels.Labels{s1, s2, s3, s4} {
_, err := app.Append(0, s, 0, 0)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Add a first round of metadata to the first three series.
// Re-take the Appender, as the previous Commit will have it closed.
m1 := metadata.Metadata{Type: "gauge", Unit: "unit_1", Help: "help_1"}
m2 := metadata.Metadata{Type: "gauge", Unit: "unit_2", Help: "help_2"}
m3 := metadata.Metadata{Type: "gauge", Unit: "unit_3", Help: "help_3"}
m4 := metadata.Metadata{Type: "gauge", Unit: "unit_4", Help: "help_4"}
app = hb.Appender(ctx)
updateMetadata(t, app, s1, m1)
updateMetadata(t, app, s2, m2)
updateMetadata(t, app, s3, m3)
updateMetadata(t, app, s4, m4)
require.NoError(t, app.Commit())
// Update metadata for first series.
m5 := metadata.Metadata{Type: "counter", Unit: "unit_5", Help: "help_5"}
app = hb.Appender(ctx)
updateMetadata(t, app, s1, m5)
require.NoError(t, app.Commit())
// Switch back-and-forth metadata for second series.
// Since it ended on a new metadata record, we expect a single new entry.
m6 := metadata.Metadata{Type: "counter", Unit: "unit_6", Help: "help_6"}
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m6)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m2)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m6)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m2)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m6)
require.NoError(t, app.Commit())
// Let's create a checkpoint.
first, last, err := wal.Segments(w.Dir())
require.NoError(t, err)
keep := func(id chunks.HeadSeriesRef) bool {
return id != 3
}
_, err = wal.Checkpoint(log.NewNopLogger(), w, first, last-1, keep, 0)
require.NoError(t, err)
// Confirm there's been a checkpoint.
cdir, _, err := wal.LastCheckpoint(w.Dir())
require.NoError(t, err)
// Read in checkpoint and WAL.
recs := readTestWAL(t, cdir)
var gotMetadataBlocks [][]record.RefMetadata
for _, rec := range recs {
if mr, ok := rec.([]record.RefMetadata); ok {
gotMetadataBlocks = append(gotMetadataBlocks, mr)
}
}
// There should only be 1 metadata block present, with only the latest
// metadata kept around.
wantMetadata := []record.RefMetadata{
{Ref: 1, Type: record.GetMetricType(m5.Type), Unit: m5.Unit, Help: m5.Help},
{Ref: 2, Type: record.GetMetricType(m6.Type), Unit: m6.Unit, Help: m6.Help},
{Ref: 4, Type: record.GetMetricType(m4.Type), Unit: m4.Unit, Help: m4.Help},
}
require.Len(t, gotMetadataBlocks, 1)
require.Len(t, gotMetadataBlocks[0], 3)
gotMetadataBlock := gotMetadataBlocks[0]
sort.Slice(gotMetadataBlock, func(i, j int) bool { return gotMetadataBlock[i].Ref < gotMetadataBlock[j].Ref })
require.Equal(t, wantMetadata, gotMetadataBlock)
require.NoError(t, hb.Close())
}
func TestMetadataAssertInMemoryData(t *testing.T) {
updateMetadata := func(t *testing.T, app storage.Appender, s labels.Labels, m metadata.Metadata) {
_, err := app.UpdateMetadata(0, s, m)
require.NoError(t, err)
}
db := openTestDB(t, nil, nil)
ctx := context.Background()
// Add some series so we can append metadata to them.
app := db.Appender(ctx)
s1 := labels.FromStrings("a", "b")
s2 := labels.FromStrings("c", "d")
s3 := labels.FromStrings("e", "f")
s4 := labels.FromStrings("g", "h")
for _, s := range []labels.Labels{s1, s2, s3, s4} {
_, err := app.Append(0, s, 0, 0)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Add a first round of metadata to the first three series.
// The in-memory data held in the db Head should hold the metadata.
m1 := metadata.Metadata{Type: "gauge", Unit: "unit_1", Help: "help_1"}
m2 := metadata.Metadata{Type: "gauge", Unit: "unit_2", Help: "help_2"}
m3 := metadata.Metadata{Type: "gauge", Unit: "unit_3", Help: "help_3"}
app = db.Appender(ctx)
updateMetadata(t, app, s1, m1)
updateMetadata(t, app, s2, m2)
updateMetadata(t, app, s3, m3)
require.NoError(t, app.Commit())
series1 := db.head.series.getByHash(s1.Hash(), s1)
series2 := db.head.series.getByHash(s2.Hash(), s2)
series3 := db.head.series.getByHash(s3.Hash(), s3)
series4 := db.head.series.getByHash(s4.Hash(), s4)
require.Equal(t, series1.meta, m1)
require.Equal(t, series2.meta, m2)
require.Equal(t, series3.meta, m3)
require.Equal(t, series4.meta, metadata.Metadata{})
// Add a replicated metadata entry to the first series,
// a changed metadata entry to the second series,
// and a completely new metadata entry for the fourth series.
// The in-memory data held in the db Head should be correctly updated.
m4 := metadata.Metadata{Type: "counter", Unit: "unit_4", Help: "help_4"}
m5 := metadata.Metadata{Type: "counter", Unit: "unit_5", Help: "help_5"}
app = db.Appender(ctx)
updateMetadata(t, app, s1, m1)
updateMetadata(t, app, s4, m4)
updateMetadata(t, app, s2, m5)
require.NoError(t, app.Commit())
series1 = db.head.series.getByHash(s1.Hash(), s1)
series2 = db.head.series.getByHash(s2.Hash(), s2)
series3 = db.head.series.getByHash(s3.Hash(), s3)
series4 = db.head.series.getByHash(s4.Hash(), s4)
require.Equal(t, series1.meta, m1)
require.Equal(t, series2.meta, m5)
require.Equal(t, series3.meta, m3)
require.Equal(t, series4.meta, m4)
require.NoError(t, db.Close())
// Reopen the DB, replaying the WAL. The Head must have been replayed
// correctly in memory.
reopenDB, err := Open(db.Dir(), nil, nil, nil, nil)
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, reopenDB.Close())
})
_, err = reopenDB.head.wal.Size()
require.NoError(t, err)
require.Equal(t, reopenDB.head.series.getByHash(s1.Hash(), s1).meta, m1)
require.Equal(t, reopenDB.head.series.getByHash(s2.Hash(), s2).meta, m5)
require.Equal(t, reopenDB.head.series.getByHash(s3.Hash(), s3).meta, m3)
require.Equal(t, reopenDB.head.series.getByHash(s4.Hash(), s4).meta, m4)
}

View file

@ -126,7 +126,7 @@ After the labels, the number of indexed chunks is encoded, followed by a sequenc
### Label Index ### Label Index
A label index section indexes the existing (combined) values for one or more label names. A label index section indexes the existing (combined) values for one or more label names.
The `#names` field determines the number of indexed label names, followed by the total number of entries in the `#entries` field. The body holds #entries / #names tuples of symbol table references, each tuple being of #names length. The value tuples are sorted in lexicographically increasing order. This is no longer used. The `#names` field determines the number of indexed label names, followed by the total number of entries in the `#entries` field. The body holds `#entries / #names` tuples of symbol table references, each tuple being of #names length. The value tuples are sorted in lexicographically increasing order. This is no longer used.
``` ```
┌───────────────┬────────────────┬────────────────┐ ┌───────────────┬────────────────┬────────────────┐

View file

@ -118,3 +118,33 @@ See: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/Op
│ . . . │ │ . . . │
└──────────────────────────────────────────────────────────────────┘ └──────────────────────────────────────────────────────────────────┘
``` ```
### Metadata records
Metadata records encode the metadata updates associated with a series.
```
┌────────────────────────────────────────────┐
│ type = 6 <1b>
├────────────────────────────────────────────┤
│ ┌────────────────────────────────────────┐ │
│ │ series_id <uvarint> │ │
│ ├────────────────────────────────────────┤ │
│ │ metric_type <1b> │ │
│ ├────────────────────────────────────────┤ │
│ │ num_fields <uvarint> │ │
│ ├───────────────────────┬────────────────┤ │
│ │ len(name_1) <uvarint> │ name_1 <bytes> │ │
│ ├───────────────────────┼────────────────┤ │
│ │ len(val_1) <uvarint> │ val_1 <bytes> │ │
│ ├───────────────────────┴────────────────┤ │
│ │ . . . │ │
│ ├───────────────────────┬────────────────┤ │
│ │ len(name_n) <uvarint> │ name_n <bytes> │ │
│ ├───────────────────────┼────────────────┤ │
│ │ len(val_n) <uvarint> │ val_n <bytes> │ │
│ └───────────────────────┴────────────────┘ │
│ . . . │
└────────────────────────────────────────────┘
```

View file

@ -32,6 +32,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
@ -77,6 +78,7 @@ type Head struct {
appendPool sync.Pool appendPool sync.Pool
exemplarsPool sync.Pool exemplarsPool sync.Pool
histogramsPool sync.Pool histogramsPool sync.Pool
metadataPool sync.Pool
seriesPool sync.Pool seriesPool sync.Pool
bytesPool sync.Pool bytesPool sync.Pool
memChunkPool sync.Pool memChunkPool sync.Pool
@ -1547,6 +1549,7 @@ type memSeries struct {
ref chunks.HeadSeriesRef ref chunks.HeadSeriesRef
lset labels.Labels lset labels.Labels
meta metadata.Metadata
// Immutable chunks on disk that have not yet gone into a block, in order of ascending time stamps. // Immutable chunks on disk that have not yet gone into a block, in order of ascending time stamps.
// When compaction runs, chunks get moved into a block and all pointers are shifted like so: // When compaction runs, chunks get moved into a block and all pointers are shifted like so:

View file

@ -24,6 +24,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
@ -77,6 +78,15 @@ func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t
return a.app.AppendHistogram(ref, l, t, h) return a.app.AppendHistogram(ref, l, t, h)
} }
func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
if a.app != nil {
return a.app.UpdateMetadata(ref, l, m)
}
a.app = a.head.appender()
return a.app.UpdateMetadata(ref, l, m)
}
// initTime initializes a head with the first timestamp. This only needs to be called // initTime initializes a head with the first timestamp. This only needs to be called
// for a completely fresh head with an empty WAL. // for a completely fresh head with an empty WAL.
func (h *Head) initTime(t int64) { func (h *Head) initTime(t int64) {
@ -143,6 +153,7 @@ func (h *Head) appender() *headAppender {
sampleSeries: h.getSeriesBuffer(), sampleSeries: h.getSeriesBuffer(),
exemplars: exemplarsBuf, exemplars: exemplarsBuf,
histograms: h.getHistogramBuffer(), histograms: h.getHistogramBuffer(),
metadata: h.getMetadataBuffer(),
appendID: appendID, appendID: appendID,
cleanupAppendIDsBelow: cleanupAppendIDsBelow, cleanupAppendIDsBelow: cleanupAppendIDsBelow,
} }
@ -222,6 +233,19 @@ func (h *Head) putHistogramBuffer(b []record.RefHistogram) {
h.histogramsPool.Put(b[:0]) h.histogramsPool.Put(b[:0])
} }
func (h *Head) getMetadataBuffer() []record.RefMetadata {
b := h.metadataPool.Get()
if b == nil {
return make([]record.RefMetadata, 0, 512)
}
return b.([]record.RefMetadata)
}
func (h *Head) putMetadataBuffer(b []record.RefMetadata) {
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
h.metadataPool.Put(b[:0])
}
func (h *Head) getSeriesBuffer() []*memSeries { func (h *Head) getSeriesBuffer() []*memSeries {
b := h.seriesPool.Get() b := h.seriesPool.Get()
if b == nil { if b == nil {
@ -264,6 +288,8 @@ type headAppender struct {
sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
histograms []record.RefHistogram // New histogram samples held by this appender. histograms []record.RefHistogram // New histogram samples held by this appender.
histogramSeries []*memSeries // Histogram series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). histogramSeries []*memSeries // Histogram series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
metadata []record.RefMetadata // New metadata held by this appender.
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
appendID, cleanupAppendIDsBelow uint64 appendID, cleanupAppendIDsBelow uint64
closed bool closed bool
@ -476,6 +502,37 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
return storage.SeriesRef(s.ref), nil return storage.SeriesRef(s.ref), nil
} }
// UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't
// use getOrCreate or make any of the lset sanity checks that Append does.
func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) {
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
s = a.head.series.getByHash(lset.Hash(), lset)
if s != nil {
ref = storage.SeriesRef(s.ref)
}
}
if s == nil {
return 0, fmt.Errorf("unknown series when trying to add metadata with HeadSeriesRef: %d and labels: %s", ref, lset)
}
s.RLock()
hasNewMetadata := s.meta != meta
s.RUnlock()
if hasNewMetadata {
a.metadata = append(a.metadata, record.RefMetadata{
Ref: s.ref,
Type: record.GetMetricType(meta.Type),
Unit: meta.Unit,
Help: meta.Help,
})
a.metadataSeries = append(a.metadataSeries, s)
}
return ref, nil
}
func ValidateHistogram(h *histogram.Histogram) error { func ValidateHistogram(h *histogram.Histogram) error {
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return errors.Wrap(err, "negative side") return errors.Wrap(err, "negative side")
@ -577,6 +634,14 @@ func (a *headAppender) log() error {
return errors.Wrap(err, "log series") return errors.Wrap(err, "log series")
} }
} }
if len(a.metadata) > 0 {
rec = enc.Metadata(a.metadata, buf)
buf = rec[:0]
if err := a.head.wal.Log(rec); err != nil {
return errors.Wrap(err, "log metadata")
}
}
if len(a.samples) > 0 { if len(a.samples) > 0 {
rec = enc.Samples(a.samples, buf) rec = enc.Samples(a.samples, buf)
buf = rec[:0] buf = rec[:0]
@ -645,6 +710,7 @@ func (a *headAppender) Commit() (err error) {
defer a.head.putSeriesBuffer(a.sampleSeries) defer a.head.putSeriesBuffer(a.sampleSeries)
defer a.head.putExemplarBuffer(a.exemplars) defer a.head.putExemplarBuffer(a.exemplars)
defer a.head.putHistogramBuffer(a.histograms) defer a.head.putHistogramBuffer(a.histograms)
defer a.head.putMetadataBuffer(a.metadata)
defer a.head.iso.closeAppend(a.appendID) defer a.head.iso.closeAppend(a.appendID)
total := len(a.samples) total := len(a.samples)
@ -688,6 +754,13 @@ func (a *headAppender) Commit() (err error) {
} }
} }
for i, m := range a.metadata {
series = a.metadataSeries[i]
series.Lock()
series.meta = metadata.Metadata{Type: record.ToTextparseMetricType(m.Type), Unit: m.Unit, Help: m.Help}
series.Unlock()
}
a.head.metrics.samplesAppended.Add(float64(total)) a.head.metrics.samplesAppended.Add(float64(total))
a.head.updateMinMaxTime(a.mint, a.maxt) a.head.updateMinMaxTime(a.mint, a.maxt)
@ -948,9 +1021,11 @@ func (a *headAppender) Rollback() (err error) {
a.head.putAppendBuffer(a.samples) a.head.putAppendBuffer(a.samples)
a.head.putExemplarBuffer(a.exemplars) a.head.putExemplarBuffer(a.exemplars)
a.head.putHistogramBuffer(a.histograms) a.head.putHistogramBuffer(a.histograms)
a.head.putMetadataBuffer(a.metadata)
a.samples = nil a.samples = nil
a.exemplars = nil a.exemplars = nil
a.histograms = nil a.histograms = nil
a.metadata = nil
// Series are created in the head memory regardless of rollback. Thus we have // Series are created in the head memory regardless of rollback. Thus we have
// to log them to the WAL in any case. // to log them to the WAL in any case.

View file

@ -125,6 +125,10 @@ func readTestWAL(t testing.TB, dir string) (recs []interface{}) {
tstones, err := dec.Tombstones(rec, nil) tstones, err := dec.Tombstones(rec, nil)
require.NoError(t, err) require.NoError(t, err)
recs = append(recs, tstones) recs = append(recs, tstones)
case record.Metadata:
meta, err := dec.Metadata(rec, nil)
require.NoError(t, err)
recs = append(recs, meta)
default: default:
t.Fatalf("unknown record type") t.Fatalf("unknown record type")
} }
@ -1006,7 +1010,7 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
recs := readTestWAL(t, cdir) recs := readTestWAL(t, cdir)
recs = append(recs, readTestWAL(t, w.Dir())...) recs = append(recs, readTestWAL(t, w.Dir())...)
var series, samples, stones int var series, samples, stones, metadata int
for _, rec := range recs { for _, rec := range recs {
switch rec.(type) { switch rec.(type) {
case []record.RefSeries: case []record.RefSeries:
@ -1015,6 +1019,8 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
samples++ samples++
case []tombstones.Stone: case []tombstones.Stone:
stones++ stones++
case []record.RefMetadata:
metadata++
default: default:
t.Fatalf("unknown record type") t.Fatalf("unknown record type")
} }
@ -1022,6 +1028,7 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
require.Equal(t, 1, series) require.Equal(t, 1, series)
require.Equal(t, 9999, samples) require.Equal(t, 9999, samples)
require.Equal(t, 1, stones) require.Equal(t, 1, stones)
require.Equal(t, 0, metadata)
} }
func TestDelete_e2e(t *testing.T) { func TestDelete_e2e(t *testing.T) {

View file

@ -30,6 +30,7 @@ import (
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
@ -42,13 +43,12 @@ import (
) )
func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) { func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) {
// Track number of samples that referenced a series we don't know about var (
// for error reporting. // Track number of samples that referenced a series we don't know about for error reporting.
var unknownRefs atomic.Uint64 unknownRefs, unknownExemplarRefs, unknownHistogramRefs, unknownMetadataRefs atomic.Uint64
var unknownExemplarRefs atomic.Uint64 // Track number of series records that had overlapping m-map chunks.
var unknownHistogramRefs atomic.Uint64 mmapOverlappingChunks uint64
// Track number of series records that had overlapping m-map chunks. )
var mmapOverlappingChunks uint64
// Start workers that each process samples for a partition of the series ID space. // Start workers that each process samples for a partition of the series ID space.
var ( var (
@ -88,6 +88,11 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
return []record.RefHistogram{} return []record.RefHistogram{}
}, },
} }
metadataPool = sync.Pool{
New: func() interface{} {
return []record.RefMetadata{}
},
}
) )
defer func() { defer func() {
@ -204,6 +209,18 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
return return
} }
decoded <- hists decoded <- hists
case record.Metadata:
meta := metadataPool.Get().([]record.RefMetadata)[:0]
meta, err := dec.Metadata(rec, meta)
if err != nil {
decodeErr = &wal.CorruptionErr{
Err: errors.Wrap(err, "decode metadata"),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- meta
default: default:
// Noop. // Noop.
} }
@ -355,6 +372,21 @@ Outer:
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification. //nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
histogramsPool.Put(v) histogramsPool.Put(v)
case []record.RefMetadata:
for _, m := range v {
s := h.series.getByID(chunks.HeadSeriesRef(m.Ref))
if s == nil {
unknownMetadataRefs.Inc()
continue
}
s.meta = metadata.Metadata{
Type: record.ToTextparseMetricType(m.Type),
Unit: m.Unit,
Help: m.Help,
}
}
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
metadataPool.Put(v)
default: default:
panic(fmt.Errorf("unexpected decoded type: %T", d)) panic(fmt.Errorf("unexpected decoded type: %T", d))
} }
@ -381,8 +413,14 @@ Outer:
return errors.Wrap(r.Err(), "read records") return errors.Wrap(r.Err(), "read records")
} }
if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 || unknownHistogramRefs.Load() > 0 { if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 {
level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load(), "histograms", unknownHistogramRefs.Load()) level.Warn(h.logger).Log(
"msg", "Unknown series references",
"samples", unknownRefs.Load(),
"exemplars", unknownExemplarRefs.Load(),
"histograms", unknownHistogramRefs.Load(),
"metadata", unknownMetadataRefs.Load(),
)
} }
if mmapOverlappingChunks > 0 { if mmapOverlappingChunks > 0 {
level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", mmapOverlappingChunks) level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", mmapOverlappingChunks)
@ -567,11 +605,7 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte {
buf.PutByte(chunkSnapshotRecordTypeSeries) buf.PutByte(chunkSnapshotRecordTypeSeries)
buf.PutBE64(uint64(s.ref)) buf.PutBE64(uint64(s.ref))
buf.PutUvarint(len(s.lset)) record.EncodeLabels(&buf, s.lset)
for _, l := range s.lset {
buf.PutUvarintStr(l.Name)
buf.PutUvarintStr(l.Value)
}
buf.PutBE64int64(s.chunkRange) buf.PutBE64int64(s.chunkRange)
s.Lock() s.Lock()
@ -594,7 +628,7 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte {
return buf.Get() return buf.Get()
} }
func decodeSeriesFromChunkSnapshot(b []byte) (csr chunkSnapshotRecord, err error) { func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapshotRecord, err error) {
dec := encoding.Decbuf{B: b} dec := encoding.Decbuf{B: b}
if flag := dec.Byte(); flag != chunkSnapshotRecordTypeSeries { if flag := dec.Byte(); flag != chunkSnapshotRecordTypeSeries {
@ -602,13 +636,9 @@ func decodeSeriesFromChunkSnapshot(b []byte) (csr chunkSnapshotRecord, err error
} }
csr.ref = chunks.HeadSeriesRef(dec.Be64()) csr.ref = chunks.HeadSeriesRef(dec.Be64())
// The label set written to the disk is already sorted. // The label set written to the disk is already sorted.
csr.lset = make(labels.Labels, dec.Uvarint()) // TODO: figure out why DecodeLabels calls Sort(), and perhaps remove it.
for i := range csr.lset { csr.lset = d.DecodeLabels(&dec)
csr.lset[i].Name = dec.UvarintStr()
csr.lset[i].Value = dec.UvarintStr()
}
csr.chunkRange = dec.Be64int64() csr.chunkRange = dec.Be64int64()
if dec.Uvarint() == 0 { if dec.Uvarint() == 0 {
@ -998,8 +1028,12 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
return return
} }
localRefSeries[csr.ref] = series localRefSeries[csr.ref] = series
if chunks.HeadSeriesRef(h.lastSeriesID.Load()) < series.ref { for {
h.lastSeriesID.Store(uint64(series.ref)) seriesID := uint64(series.ref)
lastSeriesID := h.lastSeriesID.Load()
if lastSeriesID >= seriesID || h.lastSeriesID.CAS(lastSeriesID, seriesID) {
break
}
} }
series.chunkRange = csr.chunkRange series.chunkRange = csr.chunkRange
@ -1040,7 +1074,7 @@ Outer:
switch rec[0] { switch rec[0] {
case chunkSnapshotRecordTypeSeries: case chunkSnapshotRecordTypeSeries:
numSeries++ numSeries++
csr, err := decodeSeriesFromChunkSnapshot(rec) csr, err := decodeSeriesFromChunkSnapshot(&dec, rec)
if err != nil { if err != nil {
loopErr = errors.Wrap(err, "decode series record") loopErr = errors.Wrap(err, "decode series record")
break Outer break Outer

View file

@ -23,6 +23,7 @@ import (
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/textparse"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/encoding"
@ -45,6 +46,88 @@ const (
Exemplars Type = 4 Exemplars Type = 4
// Histograms is used to match WAL records of type Histograms. // Histograms is used to match WAL records of type Histograms.
Histograms Type = 5 Histograms Type = 5
// Metadata is used to match WAL records of type Metadata.
Metadata Type = 6
)
func (rt Type) String() string {
switch rt {
case Series:
return "series"
case Samples:
return "samples"
case Tombstones:
return "tombstones"
case Exemplars:
return "exemplars"
case Histograms:
return "histograms"
case Metadata:
return "metadata"
default:
return "unknown"
}
}
// MetricType represents the type of a series.
type MetricType uint8
const (
UnknownMT MetricType = 0
Counter MetricType = 1
Gauge MetricType = 2
Histogram MetricType = 3
GaugeHistogram MetricType = 4
Summary MetricType = 5
Info MetricType = 6
Stateset MetricType = 7
)
func GetMetricType(t textparse.MetricType) uint8 {
switch t {
case textparse.MetricTypeCounter:
return uint8(Counter)
case textparse.MetricTypeGauge:
return uint8(Gauge)
case textparse.MetricTypeHistogram:
return uint8(Histogram)
case textparse.MetricTypeGaugeHistogram:
return uint8(GaugeHistogram)
case textparse.MetricTypeSummary:
return uint8(Summary)
case textparse.MetricTypeInfo:
return uint8(Info)
case textparse.MetricTypeStateset:
return uint8(Stateset)
default:
return uint8(UnknownMT)
}
}
func ToTextparseMetricType(m uint8) textparse.MetricType {
switch m {
case uint8(Counter):
return textparse.MetricTypeCounter
case uint8(Gauge):
return textparse.MetricTypeGauge
case uint8(Histogram):
return textparse.MetricTypeHistogram
case uint8(GaugeHistogram):
return textparse.MetricTypeGaugeHistogram
case uint8(Summary):
return textparse.MetricTypeSummary
case uint8(Info):
return textparse.MetricTypeInfo
case uint8(Stateset):
return textparse.MetricTypeStateset
default:
return textparse.MetricTypeUnknown
}
}
const (
unitMetaName = "UNIT"
helpMetaName = "HELP"
) )
// ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. // ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go.
@ -64,6 +147,14 @@ type RefSample struct {
V float64 V float64
} }
// RefMetadata is the metadata associated with a series ID.
type RefMetadata struct {
Ref chunks.HeadSeriesRef
Type uint8
Unit string
Help string
}
// RefExemplar is an exemplar with it's labels, timestamp, value the exemplar was collected/observed with, and a reference to a series. // RefExemplar is an exemplar with it's labels, timestamp, value the exemplar was collected/observed with, and a reference to a series.
type RefExemplar struct { type RefExemplar struct {
Ref chunks.HeadSeriesRef Ref chunks.HeadSeriesRef
@ -79,7 +170,7 @@ type RefHistogram struct {
H *histogram.Histogram H *histogram.Histogram
} }
// Decoder decodes series, sample, and tombstone records. // Decoder decodes series, sample, metadata, and tombstone records.
// The zero value is ready to use. // The zero value is ready to use.
type Decoder struct{} type Decoder struct{}
@ -90,7 +181,7 @@ func (d *Decoder) Type(rec []byte) Type {
return Unknown return Unknown
} }
switch t := Type(rec[0]); t { switch t := Type(rec[0]); t {
case Series, Samples, Tombstones, Exemplars, Histograms: case Series, Samples, Tombstones, Exemplars, Histograms, Metadata:
return t return t
} }
return Unknown return Unknown
@ -105,14 +196,7 @@ func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) {
} }
for len(dec.B) > 0 && dec.Err() == nil { for len(dec.B) > 0 && dec.Err() == nil {
ref := storage.SeriesRef(dec.Be64()) ref := storage.SeriesRef(dec.Be64())
lset := d.DecodeLabels(&dec)
lset := make(labels.Labels, dec.Uvarint())
for i := range lset {
lset[i].Name = dec.UvarintStr()
lset[i].Value = dec.UvarintStr()
}
sort.Sort(lset)
series = append(series, RefSeries{ series = append(series, RefSeries{
Ref: chunks.HeadSeriesRef(ref), Ref: chunks.HeadSeriesRef(ref),
@ -128,6 +212,61 @@ func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) {
return series, nil return series, nil
} }
// Metadata appends metadata in rec to the given slice.
func (d *Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, error) {
dec := encoding.Decbuf{B: rec}
if Type(dec.Byte()) != Metadata {
return nil, errors.New("invalid record type")
}
for len(dec.B) > 0 && dec.Err() == nil {
ref := dec.Uvarint64()
typ := dec.Byte()
numFields := dec.Uvarint()
// We're currently aware of two more metadata fields other than TYPE; that is UNIT and HELP.
// We can skip the rest of the fields (if we encounter any), but we must decode them anyway
// so we can correctly align with the start with the next metadata record.
var unit, help string
for i := 0; i < numFields; i++ {
fieldName := dec.UvarintStr()
fieldValue := dec.UvarintStr()
switch fieldName {
case unitMetaName:
unit = fieldValue
case helpMetaName:
help = fieldValue
}
}
metadata = append(metadata, RefMetadata{
Ref: chunks.HeadSeriesRef(ref),
Type: typ,
Unit: unit,
Help: help,
})
}
if dec.Err() != nil {
return nil, dec.Err()
}
if len(dec.B) > 0 {
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return metadata, nil
}
// DecodeLabels decodes one set of labels from buf.
func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels {
lset := make(labels.Labels, dec.Uvarint())
for i := range lset {
lset[i].Name = dec.UvarintStr()
lset[i].Value = dec.UvarintStr()
}
sort.Sort(lset)
return lset
}
// Samples appends samples in rec to the given slice. // Samples appends samples in rec to the given slice.
func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) { func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) {
dec := encoding.Decbuf{B: rec} dec := encoding.Decbuf{B: rec}
@ -209,13 +348,7 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp
dref := dec.Varint64() dref := dec.Varint64()
dtime := dec.Varint64() dtime := dec.Varint64()
val := dec.Be64() val := dec.Be64()
lset := d.DecodeLabels(dec)
lset := make(labels.Labels, dec.Uvarint())
for i := range lset {
lset[i].Name = dec.UvarintStr()
lset[i].Value = dec.UvarintStr()
}
sort.Sort(lset)
exemplars = append(exemplars, RefExemplar{ exemplars = append(exemplars, RefExemplar{
Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)), Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)),
@ -327,16 +460,41 @@ func (e *Encoder) Series(series []RefSeries, b []byte) []byte {
for _, s := range series { for _, s := range series {
buf.PutBE64(uint64(s.Ref)) buf.PutBE64(uint64(s.Ref))
buf.PutUvarint(len(s.Labels)) EncodeLabels(&buf, s.Labels)
for _, l := range s.Labels {
buf.PutUvarintStr(l.Name)
buf.PutUvarintStr(l.Value)
}
} }
return buf.Get() return buf.Get()
} }
// Metadata appends the encoded metadata to b and returns the resulting slice.
func (e *Encoder) Metadata(metadata []RefMetadata, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Metadata))
for _, m := range metadata {
buf.PutUvarint64(uint64(m.Ref))
buf.PutByte(m.Type)
buf.PutUvarint(2) // num_fields: We currently have two more metadata fields, UNIT and HELP.
buf.PutUvarintStr(unitMetaName)
buf.PutUvarintStr(m.Unit)
buf.PutUvarintStr(helpMetaName)
buf.PutUvarintStr(m.Help)
}
return buf.Get()
}
// EncodeLabels encodes the contents of labels into buf.
func EncodeLabels(buf *encoding.Encbuf, lbls labels.Labels) {
buf.PutUvarint(len(lbls))
for _, l := range lbls {
buf.PutUvarintStr(l.Name)
buf.PutUvarintStr(l.Value)
}
}
// Samples appends the encoded samples to b and returns the resulting slice. // Samples appends the encoded samples to b and returns the resulting slice.
func (e *Encoder) Samples(samples []RefSample, b []byte) []byte { func (e *Encoder) Samples(samples []RefSample, b []byte) []byte {
buf := encoding.Encbuf{B: b} buf := encoding.Encbuf{B: b}
@ -401,12 +559,7 @@ func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encodi
buf.PutVarint64(int64(ex.Ref) - int64(first.Ref)) buf.PutVarint64(int64(ex.Ref) - int64(first.Ref))
buf.PutVarint64(ex.T - first.T) buf.PutVarint64(ex.T - first.T)
buf.PutBE64(math.Float64bits(ex.V)) buf.PutBE64(math.Float64bits(ex.V))
EncodeLabels(buf, ex.Labels)
buf.PutUvarint(len(ex.Labels))
for _, l := range ex.Labels {
buf.PutUvarintStr(l.Name)
buf.PutUvarintStr(l.Value)
}
} }
} }

View file

@ -45,6 +45,30 @@ func TestRecord_EncodeDecode(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, series, decSeries) require.Equal(t, series, decSeries)
metadata := []RefMetadata{
{
Ref: 100,
Type: uint8(Counter),
Unit: "",
Help: "some magic counter",
},
{
Ref: 1,
Type: uint8(Counter),
Unit: "seconds",
Help: "CPU time counter",
},
{
Ref: 147741,
Type: uint8(Gauge),
Unit: "percentage",
Help: "current memory usage",
},
}
decMetadata, err := dec.Metadata(enc.Metadata(metadata, nil), nil)
require.NoError(t, err)
require.Equal(t, metadata, decMetadata)
samples := []RefSample{ samples := []RefSample{
{Ref: 0, T: 12423423, V: 1.2345}, {Ref: 0, T: 12423423, V: 1.2345},
{Ref: 123, T: -1231, V: -123}, {Ref: 123, T: -1231, V: -123},
@ -136,6 +160,16 @@ func TestRecord_Corrupted(t *testing.T) {
_, err := dec.Exemplars(corrupted, nil) _, err := dec.Exemplars(corrupted, nil)
require.Equal(t, errors.Cause(err), encoding.ErrInvalidSize) require.Equal(t, errors.Cause(err), encoding.ErrInvalidSize)
}) })
t.Run("Test corrupted metadata record", func(t *testing.T) {
meta := []RefMetadata{
{Ref: 147, Type: uint8(Counter), Unit: "unit", Help: "help"},
}
corrupted := enc.Metadata(meta, nil)[:8]
_, err := dec.Metadata(corrupted, nil)
require.Equal(t, errors.Cause(err), encoding.ErrInvalidSize)
})
} }
func TestRecord_Type(t *testing.T) { func TestRecord_Type(t *testing.T) {
@ -154,9 +188,80 @@ func TestRecord_Type(t *testing.T) {
recordType = dec.Type(enc.Tombstones(tstones, nil)) recordType = dec.Type(enc.Tombstones(tstones, nil))
require.Equal(t, Tombstones, recordType) require.Equal(t, Tombstones, recordType)
metadata := []RefMetadata{{Ref: 147, Type: uint8(Counter), Unit: "unit", Help: "help"}}
recordType = dec.Type(enc.Metadata(metadata, nil))
require.Equal(t, Metadata, recordType)
recordType = dec.Type(nil) recordType = dec.Type(nil)
require.Equal(t, Unknown, recordType) require.Equal(t, Unknown, recordType)
recordType = dec.Type([]byte{0}) recordType = dec.Type([]byte{0})
require.Equal(t, Unknown, recordType) require.Equal(t, Unknown, recordType)
} }
func TestRecord_MetadataDecodeUnknownExtraFields(t *testing.T) {
var enc encoding.Encbuf
var dec Decoder
// Write record type.
enc.PutByte(byte(Metadata))
// Write first metadata entry, all known fields.
enc.PutUvarint64(101)
enc.PutByte(byte(Counter))
enc.PutUvarint(2)
enc.PutUvarintStr(unitMetaName)
enc.PutUvarintStr("")
enc.PutUvarintStr(helpMetaName)
enc.PutUvarintStr("some magic counter")
// Write second metadata entry, known fields + unknown fields.
enc.PutUvarint64(99)
enc.PutByte(byte(Counter))
enc.PutUvarint(3)
// Known fields.
enc.PutUvarintStr(unitMetaName)
enc.PutUvarintStr("seconds")
enc.PutUvarintStr(helpMetaName)
enc.PutUvarintStr("CPU time counter")
// Unknown fields.
enc.PutUvarintStr("an extra field name to be skipped")
enc.PutUvarintStr("with its value")
// Write third metadata entry, with unknown fields and different order.
enc.PutUvarint64(47250)
enc.PutByte(byte(Gauge))
enc.PutUvarint(4)
enc.PutUvarintStr("extra name one")
enc.PutUvarintStr("extra value one")
enc.PutUvarintStr(helpMetaName)
enc.PutUvarintStr("current memory usage")
enc.PutUvarintStr("extra name two")
enc.PutUvarintStr("extra value two")
enc.PutUvarintStr(unitMetaName)
enc.PutUvarintStr("percentage")
// Should yield known fields for all entries and skip over unknown fields.
expectedMetadata := []RefMetadata{
{
Ref: 101,
Type: uint8(Counter),
Unit: "",
Help: "some magic counter",
}, {
Ref: 99,
Type: uint8(Counter),
Unit: "seconds",
Help: "CPU time counter",
}, {
Ref: 47250,
Type: uint8(Gauge),
Unit: "percentage",
Help: "current memory usage",
},
}
decMetadata, err := dec.Metadata(enc.Get(), nil)
require.NoError(t, err)
require.Equal(t, expectedMetadata, decMetadata)
}

View file

@ -23,7 +23,6 @@ import (
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"sync" "sync"
"time" "time"
@ -32,7 +31,6 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/encoding"
@ -790,12 +788,7 @@ const (
func (w *SegmentWAL) encodeSeries(buf *encoding.Encbuf, series []record.RefSeries) uint8 { func (w *SegmentWAL) encodeSeries(buf *encoding.Encbuf, series []record.RefSeries) uint8 {
for _, s := range series { for _, s := range series {
buf.PutBE64(uint64(s.Ref)) buf.PutBE64(uint64(s.Ref))
buf.PutUvarint(len(s.Labels)) record.EncodeLabels(buf, s.Labels)
for _, l := range s.Labels {
buf.PutUvarintStr(l.Name)
buf.PutUvarintStr(l.Value)
}
} }
return walSeriesSimple return walSeriesSimple
} }
@ -840,6 +833,7 @@ type walReader struct {
cur int cur int
buf []byte buf []byte
crc32 hash.Hash32 crc32 hash.Hash32
dec record.Decoder
curType WALEntryType curType WALEntryType
curFlag byte curFlag byte
@ -1123,14 +1117,7 @@ func (r *walReader) decodeSeries(flag byte, b []byte, res *[]record.RefSeries) e
for len(dec.B) > 0 && dec.Err() == nil { for len(dec.B) > 0 && dec.Err() == nil {
ref := chunks.HeadSeriesRef(dec.Be64()) ref := chunks.HeadSeriesRef(dec.Be64())
lset := r.dec.DecodeLabels(&dec)
lset := make(labels.Labels, dec.Uvarint())
for i := range lset {
lset[i].Name = dec.UvarintStr()
lset[i].Value = dec.UvarintStr()
}
sort.Sort(lset)
*res = append(*res, record.RefSeries{ *res = append(*res, record.RefSeries{
Ref: ref, Ref: ref,

View file

@ -41,10 +41,12 @@ type CheckpointStats struct {
DroppedSamples int DroppedSamples int
DroppedTombstones int DroppedTombstones int
DroppedExemplars int DroppedExemplars int
DroppedMetadata int
TotalSeries int // Processed series including dropped ones. TotalSeries int // Processed series including dropped ones.
TotalSamples int // Processed samples including dropped ones. TotalSamples int // Processed samples including dropped ones.
TotalTombstones int // Processed tombstones including dropped ones. TotalTombstones int // Processed tombstones including dropped ones.
TotalExemplars int // Processed exemplars including dropped ones. TotalExemplars int // Processed exemplars including dropped ones.
TotalMetadata int // Processed metadata including dropped ones.
} }
// LastCheckpoint returns the directory name and index of the most recent checkpoint. // LastCheckpoint returns the directory name and index of the most recent checkpoint.
@ -84,7 +86,8 @@ const checkpointPrefix = "checkpoint."
// Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL. // Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL.
// It includes the most recent checkpoint if it exists. // It includes the most recent checkpoint if it exists.
// All series not satisfying keep and samples/tombstones/exemplars below mint are dropped. // All series not satisfying keep, samples/tombstones/exemplars below mint and
// metadata that are not the latest are dropped.
// //
// The checkpoint is stored in a directory named checkpoint.N in the same // The checkpoint is stored in a directory named checkpoint.N in the same
// segmented format as the original WAL itself. // segmented format as the original WAL itself.
@ -149,13 +152,16 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.Hea
samples []record.RefSample samples []record.RefSample
tstones []tombstones.Stone tstones []tombstones.Stone
exemplars []record.RefExemplar exemplars []record.RefExemplar
metadata []record.RefMetadata
dec record.Decoder dec record.Decoder
enc record.Encoder enc record.Encoder
buf []byte buf []byte
recs [][]byte recs [][]byte
latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata)
) )
for r.Next() { for r.Next() {
series, samples, tstones, exemplars = series[:0], samples[:0], tstones[:0], exemplars[:0] series, samples, tstones, exemplars, metadata = series[:0], samples[:0], tstones[:0], exemplars[:0], metadata[:0]
// We don't reset the buffer since we batch up multiple records // We don't reset the buffer since we batch up multiple records
// before writing them to the checkpoint. // before writing them to the checkpoint.
@ -238,6 +244,23 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.Hea
} }
stats.TotalExemplars += len(exemplars) stats.TotalExemplars += len(exemplars)
stats.DroppedExemplars += len(exemplars) - len(repl) stats.DroppedExemplars += len(exemplars) - len(repl)
case record.Metadata:
metadata, err := dec.Metadata(rec, metadata)
if err != nil {
return nil, errors.Wrap(err, "decode metadata")
}
// Only keep reference to the latest found metadata for each refID.
repl := 0
for _, m := range metadata {
if keep(m.Ref) {
if _, ok := latestMetadataMap[m.Ref]; !ok {
repl++
}
latestMetadataMap[m.Ref] = m
}
}
stats.TotalMetadata += len(metadata)
stats.DroppedMetadata += len(metadata) - repl
default: default:
// Unknown record type, probably from a future Prometheus version. // Unknown record type, probably from a future Prometheus version.
continue continue
@ -265,6 +288,18 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.Hea
if err := cp.Log(recs...); err != nil { if err := cp.Log(recs...); err != nil {
return nil, errors.Wrap(err, "flush records") return nil, errors.Wrap(err, "flush records")
} }
// Flush latest metadata records for each series.
if len(latestMetadataMap) > 0 {
latestMetadata := make([]record.RefMetadata, 0, len(latestMetadataMap))
for _, m := range latestMetadataMap {
latestMetadata = append(latestMetadata, m)
}
if err := cp.Log(enc.Metadata(latestMetadata, buf[:0])); err != nil {
return nil, errors.Wrap(err, "flush metadata records")
}
}
if err := cp.Close(); err != nil { if err := cp.Close(); err != nil {
return nil, errors.Wrap(err, "close checkpoint") return nil, errors.Wrap(err, "close checkpoint")
} }

View file

@ -18,6 +18,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"strings" "strings"
"testing" "testing"
@ -153,6 +154,14 @@ func TestCheckpoint(t *testing.T) {
{Ref: 5, Labels: labels.FromStrings("a", "b", "c", "5")}, {Ref: 5, Labels: labels.FromStrings("a", "b", "c", "5")},
}, nil) }, nil)
require.NoError(t, w.Log(b)) require.NoError(t, w.Log(b))
b = enc.Metadata([]record.RefMetadata{
{Ref: 2, Unit: "unit", Help: "help"},
{Ref: 3, Unit: "unit", Help: "help"},
{Ref: 4, Unit: "unit", Help: "help"},
{Ref: 5, Unit: "unit", Help: "help"},
}, nil)
require.NoError(t, w.Log(b))
} }
// Write samples until the WAL has enough segments. // Write samples until the WAL has enough segments.
// Make them have drifting timestamps within a record to see that they // Make them have drifting timestamps within a record to see that they
@ -170,6 +179,16 @@ func TestCheckpoint(t *testing.T) {
}, nil) }, nil)
require.NoError(t, w.Log(b)) require.NoError(t, w.Log(b))
// Write changing metadata for each series. In the end, only the latest
// version should end up in the checkpoint.
b = enc.Metadata([]record.RefMetadata{
{Ref: 0, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
{Ref: 1, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
{Ref: 2, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
{Ref: 3, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
}, nil)
require.NoError(t, w.Log(b))
last += 100 last += 100
} }
require.NoError(t, w.Close()) require.NoError(t, w.Close())
@ -193,6 +212,7 @@ func TestCheckpoint(t *testing.T) {
var dec record.Decoder var dec record.Decoder
var series []record.RefSeries var series []record.RefSeries
var metadata []record.RefMetadata
r := NewReader(sr) r := NewReader(sr)
for r.Next() { for r.Next() {
@ -214,14 +234,27 @@ func TestCheckpoint(t *testing.T) {
for _, e := range exemplars { for _, e := range exemplars {
require.GreaterOrEqual(t, e.T, last/2, "exemplar with wrong timestamp") require.GreaterOrEqual(t, e.T, last/2, "exemplar with wrong timestamp")
} }
case record.Metadata:
metadata, err = dec.Metadata(rec, metadata)
require.NoError(t, err)
} }
} }
require.NoError(t, r.Err()) require.NoError(t, r.Err())
require.Equal(t, []record.RefSeries{
expectedRefSeries := []record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")}, {Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")}, {Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")}, {Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
}, series) }
require.Equal(t, expectedRefSeries, series)
expectedRefMetadata := []record.RefMetadata{
{Ref: 0, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)},
{Ref: 2, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)},
{Ref: 4, Unit: "unit", Help: "help"},
}
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })
require.Equal(t, expectedRefMetadata, metadata)
}) })
} }
} }

View file

@ -486,7 +486,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
) )
for r.Next() && !isClosed(w.quit) { for r.Next() && !isClosed(w.quit) {
rec := r.Record() rec := r.Record()
w.recordsReadMetric.WithLabelValues(recordType(dec.Type(rec))).Inc() w.recordsReadMetric.WithLabelValues(dec.Type(rec).String()).Inc()
switch dec.Type(rec) { switch dec.Type(rec) {
case record.Series: case record.Series:
@ -587,7 +587,7 @@ func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error
) )
for r.Next() && !isClosed(w.quit) { for r.Next() && !isClosed(w.quit) {
rec := r.Record() rec := r.Record()
w.recordsReadMetric.WithLabelValues(recordType(dec.Type(rec))).Inc() w.recordsReadMetric.WithLabelValues(dec.Type(rec).String()).Inc()
switch dec.Type(rec) { switch dec.Type(rec) {
case record.Series: case record.Series:
@ -616,21 +616,6 @@ func (w *Watcher) SetStartTime(t time.Time) {
w.startTimestamp = timestamp.FromTime(t) w.startTimestamp = timestamp.FromTime(t)
} }
func recordType(rt record.Type) string {
switch rt {
case record.Series:
return "series"
case record.Samples:
return "samples"
case record.Histograms:
return "histograms"
case record.Tombstones:
return "tombstones"
default:
return "unknown"
}
}
type segmentReadFn func(w *Watcher, r *LiveReader, segmentNum int, tail bool) error type segmentReadFn func(w *Watcher, r *LiveReader, segmentNum int, tail bool) error
// Read all the series records from a Checkpoint directory. // Read all the series records from a Checkpoint directory.

View file

@ -13,11 +13,7 @@
package runtime package runtime
import ( import "golang.org/x/sys/unix"
"bytes"
"golang.org/x/sys/unix"
)
// Uname returns the uname of the host machine. // Uname returns the uname of the host machine.
func Uname() string { func Uname() string {
@ -27,11 +23,11 @@ func Uname() string {
panic("unix.Uname failed: " + err.Error()) panic("unix.Uname failed: " + err.Error())
} }
str := "(" + string(buf.Sysname[:bytes.IndexByte(buf.Sysname[:], 0)]) str := "(" + unix.ByteSliceToString(buf.Sysname[:])
str += " " + string(buf.Release[:bytes.IndexByte(buf.Release[:], 0)]) str += " " + unix.ByteSliceToString(buf.Release[:])
str += " " + string(buf.Version[:bytes.IndexByte(buf.Version[:], 0)]) str += " " + unix.ByteSliceToString(buf.Version[:])
str += " " + string(buf.Machine[:bytes.IndexByte(buf.Machine[:], 0)]) str += " " + unix.ByteSliceToString(buf.Machine[:])
str += " " + string(buf.Nodename[:bytes.IndexByte(buf.Nodename[:], 0)]) str += " " + unix.ByteSliceToString(buf.Nodename[:])
str += " " + string(buf.Domainname[:bytes.IndexByte(buf.Domainname[:], 0)]) + ")" str += " " + unix.ByteSliceToString(buf.Domainname[:]) + ")"
return str return str
} }

View file

@ -326,6 +326,9 @@ func (api *API) Register(r *route.Router) {
r.Get("/query_exemplars", wrapAgent(api.queryExemplars)) r.Get("/query_exemplars", wrapAgent(api.queryExemplars))
r.Post("/query_exemplars", wrapAgent(api.queryExemplars)) r.Post("/query_exemplars", wrapAgent(api.queryExemplars))
r.Get("/format_query", wrapAgent(api.formatQuery))
r.Post("/format_query", wrapAgent(api.formatQuery))
r.Get("/labels", wrapAgent(api.labelNames)) r.Get("/labels", wrapAgent(api.labelNames))
r.Post("/labels", wrapAgent(api.labelNames)) r.Post("/labels", wrapAgent(api.labelNames))
r.Get("/label/:name/values", wrapAgent(api.labelValues)) r.Get("/label/:name/values", wrapAgent(api.labelValues))
@ -431,6 +434,15 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
}, nil, res.Warnings, qry.Close} }, nil, res.Warnings, qry.Close}
} }
func (api *API) formatQuery(r *http.Request) (result apiFuncResult) {
expr, err := parser.ParseExpr(r.FormValue("query"))
if err != nil {
return invalidParamError(err, "query")
}
return apiFuncResult{expr.Pretty(0), nil, nil, nil}
}
func extractQueryOpts(r *http.Request) *promql.QueryOpts { func extractQueryOpts(r *http.Request) *promql.QueryOpts {
return &promql.QueryOpts{ return &promql.QueryOpts{
EnablePerStepStats: r.FormValue("stats") == "all", EnablePerStepStats: r.FormValue("stats") == "all",

View file

@ -930,6 +930,20 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
}, },
errType: errorBadData, errType: errorBadData,
}, },
{
endpoint: api.formatQuery,
query: url.Values{
"query": []string{"foo+bar"},
},
response: "foo + bar",
},
{
endpoint: api.formatQuery,
query: url.Values{
"query": []string{"invalid_expression/"},
},
errType: errorBadData,
},
{ {
endpoint: api.series, endpoint: api.series,
query: url.Values{ query: url.Values{

View file

@ -1,6 +1,6 @@
## Overview ## Overview
The `ui` directory contains static files and templates used in the web UI. For The `ui` directory contains static files and templates used in the web UI. For
easier distribution they are compressend (c.f. Makefile) and statically compiled easier distribution they are compressed (c.f. Makefile) and statically compiled
into the Prometheus binary using the embed package. into the Prometheus binary using the embed package.
During development it is more convenient to always use the files on disk to During development it is more convenient to always use the files on disk to

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.37.0-rc.0", "version": "0.37.0",
"description": "a CodeMirror mode for the PromQL language", "description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts", "types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js", "module": "dist/esm/index.js",
@ -29,7 +29,7 @@
}, },
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "^0.37.0-rc.0", "@prometheus-io/lezer-promql": "^0.37.0",
"lru-cache": "^6.0.0" "lru-cache": "^6.0.0"
}, },
"devDependencies": { "devDependencies": {

View file

@ -36,6 +36,8 @@ export interface PrometheusClient {
// Note that the returned list can be a superset of those suggestions for the prefix (i.e., including ones without the // Note that the returned list can be a superset of those suggestions for the prefix (i.e., including ones without the
// prefix), as codemirror will filter these out when displaying suggestions to the user. // prefix), as codemirror will filter these out when displaying suggestions to the user.
metricNames(prefix?: string): Promise<string[]>; metricNames(prefix?: string): Promise<string[]>;
// flags returns flag values that prometheus was configured with.
flags(): Promise<Record<string, string>>;
} }
export interface CacheConfig { export interface CacheConfig {
@ -209,6 +211,15 @@ export class HTTPPrometheusClient implements PrometheusClient {
return this.labelValues('__name__'); return this.labelValues('__name__');
} }
flags(): Promise<Record<string, string>> {
return this.fetchAPI<Record<string, string>>(this.flagsEndpoint()).catch((error) => {
if (this.errorHandler) {
this.errorHandler(error);
}
return {};
});
}
private fetchAPI<T>(resource: string, init?: RequestInit): Promise<T> { private fetchAPI<T>(resource: string, init?: RequestInit): Promise<T> {
return this.fetchFn(this.url + resource, init) return this.fetchFn(this.url + resource, init)
.then((res) => { .then((res) => {
@ -254,6 +265,10 @@ export class HTTPPrometheusClient implements PrometheusClient {
private metricMetadataEndpoint(): string { private metricMetadataEndpoint(): string {
return `${this.apiPrefix}/metadata`; return `${this.apiPrefix}/metadata`;
} }
private flagsEndpoint(): string {
return `${this.apiPrefix}/status/flags`;
}
} }
class Cache { class Cache {
@ -263,6 +278,7 @@ class Cache {
private metricMetadata: Record<string, MetricMetadata[]>; private metricMetadata: Record<string, MetricMetadata[]>;
private labelValues: LRUCache<string, string[]>; private labelValues: LRUCache<string, string[]>;
private labelNames: string[]; private labelNames: string[];
private flags: Record<string, string>;
constructor(config?: CacheConfig) { constructor(config?: CacheConfig) {
const maxAge = config && config.maxAge ? config.maxAge : 5 * 60 * 1000; const maxAge = config && config.maxAge ? config.maxAge : 5 * 60 * 1000;
@ -270,6 +286,7 @@ class Cache {
this.metricMetadata = {}; this.metricMetadata = {};
this.labelValues = new LRUCache<string, string[]>(maxAge); this.labelValues = new LRUCache<string, string[]>(maxAge);
this.labelNames = []; this.labelNames = [];
this.flags = {};
if (config?.initialMetricList) { if (config?.initialMetricList) {
this.setLabelValues('__name__', config.initialMetricList); this.setLabelValues('__name__', config.initialMetricList);
} }
@ -297,6 +314,14 @@ class Cache {
}); });
} }
setFlags(flags: Record<string, string>): void {
this.flags = flags;
}
getFlags(): Record<string, string> {
return this.flags;
}
setMetricMetadata(metadata: Record<string, MetricMetadata[]>): void { setMetricMetadata(metadata: Record<string, MetricMetadata[]>): void {
this.metricMetadata = metadata; this.metricMetadata = metadata;
} }
@ -388,7 +413,7 @@ export class CachedPrometheusClient implements PrometheusClient {
return this.client.metricMetadata().then((metadata) => { return this.client.metricMetadata().then((metadata) => {
this.cache.setMetricMetadata(metadata); this.cache.setMetricMetadata(metadata);
return this.cache.getMetricMetadata(); return metadata;
}); });
} }
@ -402,4 +427,16 @@ export class CachedPrometheusClient implements PrometheusClient {
metricNames(): Promise<string[]> { metricNames(): Promise<string[]> {
return this.labelValues('__name__'); return this.labelValues('__name__');
} }
flags(): Promise<Record<string, string>> {
const cachedFlags = this.cache.getFlags();
if (cachedFlags && Object.keys(cachedFlags).length > 0) {
return Promise.resolve(cachedFlags);
}
return this.client.flags().then((flags) => {
this.cache.setFlags(flags);
return flags;
});
}
} }

View file

@ -0,0 +1,27 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import { isPrometheusClient } from './index';
import { HTTPPrometheusClient } from '../client/prometheus';
describe('type of remoteConfig', () => {
it('should be a prometheusClient', () => {
const client = new HTTPPrometheusClient({});
expect(isPrometheusClient(client)).toBe(true);
});
it('should be a remote config', () => {
const remote = { url: 'https://prometheus.io' };
expect(isPrometheusClient(remote)).toBe(false);
});
});

View file

@ -31,16 +31,14 @@ export interface CompleteConfiguration {
completeStrategy?: CompleteStrategy; completeStrategy?: CompleteStrategy;
} }
function isPrometheusConfig(remoteConfig: PrometheusConfig | PrometheusClient): remoteConfig is PrometheusConfig { export function isPrometheusClient(remoteConfig: PrometheusConfig | PrometheusClient): remoteConfig is PrometheusClient {
const cfg = remoteConfig as PrometheusConfig; const client = remoteConfig as PrometheusClient;
return ( return (
cfg.url !== undefined || typeof client.labelNames === 'function' &&
cfg.lookbackInterval !== undefined || typeof client.labelValues === 'function' &&
cfg.httpErrorHandler !== undefined || typeof client.metricMetadata === 'function' &&
cfg.fetchFn !== undefined || typeof client.series === 'function' &&
cfg.cache !== undefined || typeof client.metricNames === 'function'
cfg.httpMethod !== undefined ||
cfg.apiPrefix !== undefined
); );
} }
@ -49,7 +47,7 @@ export function newCompleteStrategy(conf?: CompleteConfiguration): CompleteStrat
return conf.completeStrategy; return conf.completeStrategy;
} }
if (conf?.remote) { if (conf?.remote) {
if (!isPrometheusConfig(conf.remote)) { if (isPrometheusClient(conf.remote)) {
return new HybridComplete(conf.remote, conf.maxMetricsMetadata); return new HybridComplete(conf.remote, conf.maxMetricsMetadata);
} }
return new HybridComplete(new CachedPrometheusClient(new HTTPPrometheusClient(conf.remote), conf.remote.cache), conf.maxMetricsMetadata); return new HybridComplete(new CachedPrometheusClient(new HTTPPrometheusClient(conf.remote), conf.remote.cache), conf.maxMetricsMetadata);

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.37.0-rc.0", "version": "0.37.0",
"description": "lezer-based PromQL grammar", "description": "lezer-based PromQL grammar",
"main": "index.cjs", "main": "index.cjs",
"type": "module", "type": "module",

View file

@ -28,10 +28,10 @@
}, },
"module/codemirror-promql": { "module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.37.0-rc.0", "version": "0.37.0",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "^0.37.0-rc.0", "@prometheus-io/lezer-promql": "^0.37.0",
"lru-cache": "^6.0.0" "lru-cache": "^6.0.0"
}, },
"devDependencies": { "devDependencies": {
@ -61,7 +61,7 @@
}, },
"module/lezer-promql": { "module/lezer-promql": {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.37.0-rc.0", "version": "0.37.0",
"license": "Apache-2.0", "license": "Apache-2.0",
"devDependencies": { "devDependencies": {
"@lezer/generator": "^1.0.0", "@lezer/generator": "^1.0.0",
@ -17518,7 +17518,7 @@
}, },
"react-app": { "react-app": {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.37.0-rc.0", "version": "0.37.0",
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.0.0", "@codemirror/autocomplete": "^6.0.0",
"@codemirror/commands": "^6.0.0", "@codemirror/commands": "^6.0.0",
@ -17536,7 +17536,7 @@
"@lezer/lr": "^1.0.0", "@lezer/lr": "^1.0.0",
"@nexucis/fuzzy": "^0.4.0", "@nexucis/fuzzy": "^0.4.0",
"@nexucis/kvsearch": "^0.7.0", "@nexucis/kvsearch": "^0.7.0",
"@prometheus-io/codemirror-promql": "^0.37.0-rc.0", "@prometheus-io/codemirror-promql": "^0.37.0",
"bootstrap": "^4.6.1", "bootstrap": "^4.6.1",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^6.1.7", "downshift": "^6.1.7",
@ -19783,7 +19783,7 @@
"@lezer/lr": "^1.0.0", "@lezer/lr": "^1.0.0",
"@nexucis/fuzzy": "^0.4.0", "@nexucis/fuzzy": "^0.4.0",
"@nexucis/kvsearch": "^0.7.0", "@nexucis/kvsearch": "^0.7.0",
"@prometheus-io/codemirror-promql": "^0.37.0-rc.0", "@prometheus-io/codemirror-promql": "^0.37.0",
"@testing-library/react-hooks": "^7.0.1", "@testing-library/react-hooks": "^7.0.1",
"@types/enzyme": "^3.10.10", "@types/enzyme": "^3.10.10",
"@types/flot": "0.0.32", "@types/flot": "0.0.32",
@ -19835,7 +19835,7 @@
"@lezer/common": "^1.0.0", "@lezer/common": "^1.0.0",
"@lezer/highlight": "^1.0.0", "@lezer/highlight": "^1.0.0",
"@lezer/lr": "^1.0.0", "@lezer/lr": "^1.0.0",
"@prometheus-io/lezer-promql": "^0.37.0-rc.0", "@prometheus-io/lezer-promql": "^0.37.0",
"@types/lru-cache": "^5.1.1", "@types/lru-cache": "^5.1.1",
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"lru-cache": "^6.0.0", "lru-cache": "^6.0.0",

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.37.0-rc.0", "version": "0.37.0",
"private": true, "private": true,
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.0.0", "@codemirror/autocomplete": "^6.0.0",
@ -19,7 +19,7 @@
"@lezer/common": "^1.0.0", "@lezer/common": "^1.0.0",
"@nexucis/fuzzy": "^0.4.0", "@nexucis/fuzzy": "^0.4.0",
"@nexucis/kvsearch": "^0.7.0", "@nexucis/kvsearch": "^0.7.0",
"@prometheus-io/codemirror-promql": "^0.37.0-rc.0", "@prometheus-io/codemirror-promql": "^0.37.0",
"bootstrap": "^4.6.1", "bootstrap": "^4.6.1",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^6.1.7", "downshift": "^6.1.7",

View file

@ -19,8 +19,6 @@ export const baseTheme = EditorView.theme({
}, },
'.cm-matchingBracket': { '.cm-matchingBracket': {
color: '#000',
backgroundColor: '#dedede',
fontWeight: 'bold', fontWeight: 'bold',
outline: '1px dashed transparent', outline: '1px dashed transparent',
}, },
@ -82,7 +80,6 @@ export const baseTheme = EditorView.theme({
'.cm-completionMatchedText': { '.cm-completionMatchedText': {
textDecoration: 'none', textDecoration: 'none',
fontWeight: 'bold', fontWeight: 'bold',
color: '#0066bf',
}, },
'.cm-selectionMatch': { '.cm-selectionMatch': {
@ -105,12 +102,10 @@ export const baseTheme = EditorView.theme({
fontFamily: 'codicon', fontFamily: 'codicon',
paddingRight: '0', paddingRight: '0',
opacity: '1', opacity: '1',
color: '#007acc',
}, },
'.cm-completionIcon-function, .cm-completionIcon-method': { '.cm-completionIcon-function, .cm-completionIcon-method': {
'&:after': { content: "'\\ea8c'" }, '&:after': { content: "'\\ea8c'" },
color: '#652d90',
}, },
'.cm-completionIcon-class': { '.cm-completionIcon-class': {
'&:after': { content: "'○'" }, '&:after': { content: "'○'" },
@ -123,7 +118,6 @@ export const baseTheme = EditorView.theme({
}, },
'.cm-completionIcon-constant': { '.cm-completionIcon-constant': {
'&:after': { content: "'\\eb5f'" }, '&:after': { content: "'\\eb5f'" },
color: '#007acc',
}, },
'.cm-completionIcon-type': { '.cm-completionIcon-type': {
'&:after': { content: "'𝑡'" }, '&:after': { content: "'𝑡'" },
@ -136,7 +130,6 @@ export const baseTheme = EditorView.theme({
}, },
'.cm-completionIcon-keyword': { '.cm-completionIcon-keyword': {
'&:after': { content: "'\\eb62'" }, '&:after': { content: "'\\eb62'" },
color: '#616161',
}, },
'.cm-completionIcon-namespace': { '.cm-completionIcon-namespace': {
'&:after': { content: "'▢'" }, '&:after': { content: "'▢'" },
@ -187,6 +180,31 @@ export const lightTheme = EditorView.theme(
backgroundColor: '#add6ff', backgroundColor: '#add6ff',
}, },
}, },
'.cm-matchingBracket': {
color: '#000',
backgroundColor: '#dedede',
},
'.cm-completionMatchedText': {
color: '#0066bf',
},
'.cm-completionIcon': {
color: '#007acc',
},
'.cm-completionIcon-constant': {
color: '#007acc',
},
'.cm-completionIcon-function, .cm-completionIcon-method': {
color: '#652d90',
},
'.cm-completionIcon-keyword': {
color: '#616161',
},
}, },
{ dark: false } { dark: false }
); );
@ -220,6 +238,26 @@ export const darkTheme = EditorView.theme(
backgroundColor: '#767676', backgroundColor: '#767676',
}, },
}, },
'.cm-matchingBracket, &.cm-focused .cm-matchingBracket': {
backgroundColor: '#616161',
},
'.cm-completionMatchedText': {
color: '#7dd3fc',
},
'.cm-completionIcon, .cm-completionIcon-constant': {
color: '#7dd3fc',
},
'.cm-completionIcon-function, .cm-completionIcon-method': {
color: '#d8b4fe',
},
'.cm-completionIcon-keyword': {
color: '#cbd5e1 !important',
},
}, },
{ dark: true } { dark: true }
); );
@ -239,3 +277,19 @@ export const promqlHighlighter = HighlightStyle.define([
{ tag: tags.invalid, color: 'red' }, { tag: tags.invalid, color: 'red' },
{ tag: tags.comment, color: '#888', fontStyle: 'italic' }, { tag: tags.comment, color: '#888', fontStyle: 'italic' },
]); ]);
export const darkPromqlHighlighter = HighlightStyle.define([
{ tag: tags.name, color: '#000' },
{ tag: tags.number, color: '#22c55e' },
{ tag: tags.string, color: '#fca5a5' },
{ tag: tags.keyword, color: '#14bfad' },
{ tag: tags.function(tags.variableName), color: '#14bfad' },
{ tag: tags.labelName, color: '#ff8585' },
{ tag: tags.operator },
{ tag: tags.modifier, color: '#14bfad' },
{ tag: tags.paren },
{ tag: tags.squareBracket },
{ tag: tags.brace },
{ tag: tags.invalid, color: '#ff3d3d' },
{ tag: tags.comment, color: '#9ca3af', fontStyle: 'italic' },
]);

View file

@ -1,5 +1,5 @@
import React, { FC, useState, useEffect, useRef } from 'react'; import React, { FC, useState, useEffect, useRef } from 'react';
import { Button, InputGroup, InputGroupAddon, InputGroupText } from 'reactstrap'; import { Alert, Button, InputGroup, InputGroupAddon, InputGroupText } from 'reactstrap';
import { EditorView, highlightSpecialChars, keymap, ViewUpdate, placeholder } from '@codemirror/view'; import { EditorView, highlightSpecialChars, keymap, ViewUpdate, placeholder } from '@codemirror/view';
import { EditorState, Prec, Compartment } from '@codemirror/state'; import { EditorState, Prec, Compartment } from '@codemirror/state';
@ -15,15 +15,16 @@ import {
closeBrackets, closeBrackets,
closeBracketsKeymap, closeBracketsKeymap,
} from '@codemirror/autocomplete'; } from '@codemirror/autocomplete';
import { baseTheme, lightTheme, darkTheme, promqlHighlighter } from './CMTheme'; import { baseTheme, lightTheme, darkTheme, promqlHighlighter, darkPromqlHighlighter } from './CMTheme';
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome'; import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import { faSearch, faSpinner, faGlobeEurope } from '@fortawesome/free-solid-svg-icons'; import { faSearch, faSpinner, faGlobeEurope, faIndent, faCheck } from '@fortawesome/free-solid-svg-icons';
import MetricsExplorer from './MetricsExplorer'; import MetricsExplorer from './MetricsExplorer';
import { usePathPrefix } from '../../contexts/PathPrefixContext'; import { usePathPrefix } from '../../contexts/PathPrefixContext';
import { useTheme } from '../../contexts/ThemeContext'; import { useTheme } from '../../contexts/ThemeContext';
import { CompleteStrategy, PromQLExtension } from '@prometheus-io/codemirror-promql'; import { CompleteStrategy, PromQLExtension } from '@prometheus-io/codemirror-promql';
import { newCompleteStrategy } from '@prometheus-io/codemirror-promql/dist/esm/complete'; import { newCompleteStrategy } from '@prometheus-io/codemirror-promql/dist/esm/complete';
import { API_PATH } from '../../constants/constants';
const promqlExtension = new PromQLExtension(); const promqlExtension = new PromQLExtension();
@ -98,6 +99,10 @@ const ExpressionInput: FC<CMExpressionInputProps> = ({
const pathPrefix = usePathPrefix(); const pathPrefix = usePathPrefix();
const { theme } = useTheme(); const { theme } = useTheme();
const [formatError, setFormatError] = useState<string | null>(null);
const [isFormatting, setIsFormatting] = useState<boolean>(false);
const [exprFormatted, setExprFormatted] = useState<boolean>(false);
// (Re)initialize editor based on settings / setting changes. // (Re)initialize editor based on settings / setting changes.
useEffect(() => { useEffect(() => {
// Build the dynamic part of the config. // Build the dynamic part of the config.
@ -112,8 +117,14 @@ const ExpressionInput: FC<CMExpressionInputProps> = ({
queryHistory queryHistory
), ),
}); });
let highlighter = syntaxHighlighting(theme === 'dark' ? darkPromqlHighlighter : promqlHighlighter);
if (theme === 'dark') {
highlighter = syntaxHighlighting(darkPromqlHighlighter);
}
const dynamicConfig = [ const dynamicConfig = [
enableHighlighting ? syntaxHighlighting(promqlHighlighter) : [], enableHighlighting ? highlighter : [],
promqlExtension.asExtension(), promqlExtension.asExtension(),
theme === 'dark' ? darkTheme : lightTheme, theme === 'dark' ? darkTheme : lightTheme,
]; ];
@ -169,7 +180,10 @@ const ExpressionInput: FC<CMExpressionInputProps> = ({
]) ])
), ),
EditorView.updateListener.of((update: ViewUpdate): void => { EditorView.updateListener.of((update: ViewUpdate): void => {
onExpressionChange(update.state.doc.toString()); if (update.docChanged) {
onExpressionChange(update.state.doc.toString());
setExprFormatted(false);
}
}), }),
], ],
}); });
@ -209,6 +223,47 @@ const ExpressionInput: FC<CMExpressionInputProps> = ({
); );
}; };
const formatExpression = () => {
setFormatError(null);
setIsFormatting(true);
fetch(
`${pathPrefix}/${API_PATH}/format_query?${new URLSearchParams({
query: value,
})}`,
{
cache: 'no-store',
credentials: 'same-origin',
}
)
.then((resp) => {
if (!resp.ok && resp.status !== 400) {
throw new Error(`format HTTP request failed: ${resp.statusText}`);
}
return resp.json();
})
.then((json) => {
if (json.status !== 'success') {
throw new Error(json.error || 'invalid response JSON');
}
const view = viewRef.current;
if (view === null) {
return;
}
view.dispatch(view.state.update({ changes: { from: 0, to: view.state.doc.length, insert: json.data } }));
setExprFormatted(true);
})
.catch((err) => {
setFormatError(err.message);
})
.finally(() => {
setIsFormatting(false);
});
};
return ( return (
<> <>
<InputGroup className="expression-input"> <InputGroup className="expression-input">
@ -220,7 +275,21 @@ const ExpressionInput: FC<CMExpressionInputProps> = ({
<div ref={containerRef} className="cm-expression-input" /> <div ref={containerRef} className="cm-expression-input" />
<InputGroupAddon addonType="append"> <InputGroupAddon addonType="append">
<Button <Button
className="metrics-explorer-btn" className="expression-input-action-btn"
title={isFormatting ? 'Formatting expression' : exprFormatted ? 'Expression formatted' : 'Format expression'}
onClick={formatExpression}
disabled={isFormatting || exprFormatted}
>
{isFormatting ? (
<FontAwesomeIcon icon={faSpinner} spin />
) : exprFormatted ? (
<FontAwesomeIcon icon={faCheck} />
) : (
<FontAwesomeIcon icon={faIndent} />
)}
</Button>
<Button
className="expression-input-action-btn"
title="Open metrics explorer" title="Open metrics explorer"
onClick={() => setShowMetricsExplorer(true)} onClick={() => setShowMetricsExplorer(true)}
> >
@ -232,6 +301,8 @@ const ExpressionInput: FC<CMExpressionInputProps> = ({
</InputGroupAddon> </InputGroupAddon>
</InputGroup> </InputGroup>
{formatError && <Alert color="danger">Error formatting expression: {formatError}</Alert>}
<MetricsExplorer <MetricsExplorer
show={showMetricsExplorer} show={showMetricsExplorer}
updateShow={setShowMetricsExplorer} updateShow={setShowMetricsExplorer}

View file

@ -58,7 +58,7 @@
.metrics-explorer .metric:hover { .metrics-explorer .metric:hover {
background: $metrics-explorer-bg; background: $metrics-explorer-bg;
} }
button.metrics-explorer-btn { button.expression-input-action-btn {
color: $input-group-addon-color; color: $input-group-addon-color;
background-color: $input-group-addon-bg; background-color: $input-group-addon-bg;
border: $input-border-width solid $input-group-addon-border-color; border: $input-border-width solid $input-group-addon-border-color;
@ -114,7 +114,7 @@ button.execute-btn {
} }
input[type='checkbox']:checked + label { input[type='checkbox']:checked + label {
color: #286090; color: $checked-checkbox-color;
} }
.custom-control-label { .custom-control-label {

View file

@ -11,12 +11,14 @@ $config-yaml-color: $black;
$config-yaml-bg: $gray-500; $config-yaml-bg: $gray-500;
$config-yaml-border: $gray-700; $config-yaml-border: $gray-700;
$query-stats-color: $secondary; $query-stats-color: lighten($secondary, 20%);
$metrics-explorer-bg: $dropdown-link-hover-bg; $metrics-explorer-bg: $dropdown-link-hover-bg;
$clear-time-btn-bg: $secondary; $clear-time-btn-bg: $secondary;
$checked-checkbox-color: #60a5fa;
.bootstrap-dark { .bootstrap-dark {
@import './shared'; @import './shared';
} }

View file

@ -16,6 +16,8 @@ $metrics-explorer-bg: #efefef;
$clear-time-btn-bg: $white; $clear-time-btn-bg: $white;
$checked-checkbox-color: #286090;
.bootstrap { .bootstrap {
@import './shared'; @import './shared';
} }