From e196b977afdfd3cc72ac15de97845bec056a8a3d Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 25 Sep 2024 10:38:47 +0200 Subject: [PATCH 01/74] Process MemPostings.Delete() with GOMAXPROCS workers We are still seeing lock contention on MemPostings.mtx, and MemPostings.Delete() is by far the most expensive operation on that mutex. This adds parallelism to that method, trying to reduce the amount of time we spend with the mutex held. Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 43 ++++++++++++++++++++++++++++++++----- tsdb/index/postings_test.go | 2 +- 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index bfe74c323..25780e4ad 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -26,6 +26,7 @@ import ( "sync" "github.com/bboreham/go-loser" + "github.com/cespare/xxhash/v2" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -293,6 +294,9 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma p.mtx.Lock() defer p.mtx.Unlock() + // Deleting label names mutates p.m map, so it should be done from a single goroutine after nobody else is reading it. + deleteLabelNames := make(chan string, len(p.m)) + process := func(l labels.Label) { orig := p.m[l.Name][l.Value] repl := make([]storage.SeriesRef, 0, len(orig)) @@ -305,17 +309,46 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma p.m[l.Name][l.Value] = repl } else { delete(p.m[l.Name], l.Value) - // Delete the key if we removed all values. if len(p.m[l.Name]) == 0 { - delete(p.m, l.Name) + // Delete the key if we removed all values. + deleteLabelNames <- l.Name } } } - for l := range affected { - process(l) + // Create GOMAXPROCS workers. + wg := sync.WaitGroup{} + jobs := make([]chan labels.Label, runtime.GOMAXPROCS(0)) + for i := range jobs { + jobs[i] = make(chan labels.Label, 128) + wg.Add(1) + go func(jobs chan labels.Label) { + defer wg.Done() + for l := range jobs { + process(l) + } + }(jobs[i]) + } + + // Process all affected labels and the allPostingsKey. + for l := range affected { + j := int(xxhash.Sum64String(l.Name) % uint64(len(jobs))) + jobs[j] <- l + } + j := int(xxhash.Sum64String(allPostingsKey.Name) % uint64(len(jobs))) + jobs[j] <- allPostingsKey + + // Close jobs channels and wait all workers to finish. + for i := range jobs { + close(jobs[i]) + } + wg.Wait() + + // Close deleteLabelNames channel and delete the label names requested. + close(deleteLabelNames) + for name := range deleteLabelNames { + delete(p.m, name) } - process(allPostingsKey) } // Iter calls f for each postings list. It aborts if f returns an error and returns it. diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 96c9ed124..1802c9e89 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -1025,7 +1025,7 @@ func BenchmarkMemPostings_Delete(b *testing.B) { return s } - const total = 1e6 + const total = 2e6 allSeries := [total]labels.Labels{} nameValues := make([]string, 0, 100) for i := 0; i < total; i++ { From 9c417aa71045e36d9fad66e1e77c1d942cbacc17 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 25 Sep 2024 14:08:50 +0200 Subject: [PATCH 02/74] Fix deadlock with empty MemPostings Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 25780e4ad..3164d8c2f 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -295,7 +295,8 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma defer p.mtx.Unlock() // Deleting label names mutates p.m map, so it should be done from a single goroutine after nobody else is reading it. - deleteLabelNames := make(chan string, len(p.m)) + // Adding +1 to length to account for allPostingsKey processing when MemPostings is empty. + deleteLabelNames := make(chan string, len(p.m)+1) process := func(l labels.Label) { orig := p.m[l.Name][l.Value] From ccd0308abcb98505797161b9142da1fe9ddbe88c Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 25 Sep 2024 14:59:16 +0200 Subject: [PATCH 03/74] Don't do anything if MemPostings are empty Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 3164d8c2f..e6a6c708f 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -293,10 +293,12 @@ func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) { func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected map[labels.Label]struct{}) { p.mtx.Lock() defer p.mtx.Unlock() + if len(p.m) == 0 || len(deleted) == 0 { + return + } // Deleting label names mutates p.m map, so it should be done from a single goroutine after nobody else is reading it. - // Adding +1 to length to account for allPostingsKey processing when MemPostings is empty. - deleteLabelNames := make(chan string, len(p.m)+1) + deleteLabelNames := make(chan string, len(p.m)) process := func(l labels.Label) { orig := p.m[l.Name][l.Value] From be0f10054e6310a993e9e73dd97607071749ac76 Mon Sep 17 00:00:00 2001 From: Alex Johnson Date: Sat, 14 Sep 2024 13:04:33 -0500 Subject: [PATCH 04/74] Remove no-default-scrape-port featureFlag Signed-off-by: Alex Johnson --- cmd/prometheus/main.go | 5 +--- cmd/promtool/main.go | 11 +++---- cmd/promtool/sd.go | 8 +++--- cmd/promtool/sd_test.go | 2 +- docs/command-line/prometheus.md | 2 +- docs/command-line/promtool.md | 2 +- docs/feature_flags.md | 9 ------ scrape/manager.go | 3 +- scrape/manager_test.go | 30 +++++++++---------- scrape/scrape.go | 5 +--- scrape/target.go | 51 ++------------------------------- scrape/target_test.go | 4 +-- 12 files changed, 32 insertions(+), 100 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e7fd82e6f..1be8b7694 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -217,9 +217,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "concurrent-rule-eval": c.enableConcurrentRuleEval = true level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.") - case "no-default-scrape-port": - c.scrape.NoDefaultPort = true - level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.") case "promql-experimental-functions": parser.EnableExperimentalFunctions = true level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") @@ -474,7 +471,7 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 4d4cf6c5d..48f9be930 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -291,7 +291,7 @@ func main() { promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String() promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String() - featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings() + featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Currently unused.").Default("").Strings() documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden() @@ -321,24 +321,21 @@ func main() { } } - var noDefaultScrapePort bool for _, f := range *featureList { opts := strings.Split(f, ",") for _, o := range opts { switch o { - case "no-default-scrape-port": - noDefaultScrapePort = true case "": continue default: - fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o) + fmt.Printf(" WARNING: --enable-feature is currently a no-op") } } } switch parsedCmd { case sdCheckCmd.FullCommand(): - os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer)) + os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer)) case checkConfigCmd.FullCommand(): os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) @@ -1219,7 +1216,7 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c lb := labels.NewBuilder(labels.EmptyLabels()) for _, tg := range targetGroups { var failures []error - targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb) + targets, failures = scrape.TargetsFromGroup(tg, scfg, targets, lb) if len(failures) > 0 { first := failures[0] return first diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go index e65262d43..6c0e896ff 100644 --- a/cmd/promtool/sd.go +++ b/cmd/promtool/sd.go @@ -38,7 +38,7 @@ type sdCheckResult struct { } // CheckSD performs service discovery for the given job name and reports the results. -func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int { +func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int { logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) @@ -114,7 +114,7 @@ outerLoop: } results := []sdCheckResult{} for _, tgs := range sdCheckResults { - results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...) + results = append(results, getSDCheckResult(tgs, scrapeConfig)...) } res, err := json.MarshalIndent(results, "", " ") @@ -127,7 +127,7 @@ outerLoop: return successExitCode } -func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult { +func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult { sdCheckResults := []sdCheckResult{} lb := labels.NewBuilder(labels.EmptyLabels()) for _, targetGroup := range targetGroups { @@ -144,7 +144,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc } } - res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort) + res, orig, err := scrape.PopulateLabels(lb, scrapeConfig) result := sdCheckResult{ DiscoveredLabels: orig, Labels: res, diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go index cb65ee72a..44d808465 100644 --- a/cmd/promtool/sd_test.go +++ b/cmd/promtool/sd_test.go @@ -70,5 +70,5 @@ func TestSDCheckResult(t *testing.T) { }, } - testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true)) + testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig)) } diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 32b7039bd..7737b5021 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -56,7 +56,7 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index e48cede79..996a99655 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system. | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | --version | Show application version. | | --experimental | Enable experimental commands. | -| --enable-feature ... | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | +| --enable-feature ... | Comma separated feature names to enable. Currently unused. | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 911dde20e..1e9455a3f 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -71,15 +71,6 @@ When enabled, the GOMEMLIMIT variable is automatically set to match the Linux co There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus. -## No default scrape port - -`--enable-feature=no-default-scrape-port` - -When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to -the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior. -In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or -by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed. - ## Native Histograms `--enable-feature=native-histograms` diff --git a/scrape/manager.go b/scrape/manager.go index d7786a082..cbb881028 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -70,8 +70,7 @@ func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(strin // Options are the configuration parameters to the scrape manager. type Options struct { - ExtraMetrics bool - NoDefaultPort bool + ExtraMetrics bool // Option used by downstream scraper users like OpenTelemetry Collector // to help lookup metric metadata. Should be false for Prometheus. PassMetadataInContext bool diff --git a/scrape/manager_test.go b/scrape/manager_test.go index cd712ca62..81ce8bd84 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -52,12 +52,11 @@ func init() { func TestPopulateLabels(t *testing.T) { cases := []struct { - in labels.Labels - cfg *config.ScrapeConfig - noDefaultPort bool - res labels.Labels - resOrig labels.Labels - err string + in labels.Labels + cfg *config.ScrapeConfig + res labels.Labels + resOrig labels.Labels + err string }{ // Regular population of scrape config options. { @@ -111,8 +110,8 @@ func TestPopulateLabels(t *testing.T) { ScrapeTimeout: model.Duration(time.Second), }, res: labels.FromMap(map[string]string{ - model.AddressLabel: "1.2.3.4:80", - model.InstanceLabel: "1.2.3.4:80", + model.AddressLabel: "1.2.3.4", + model.InstanceLabel: "1.2.3.4", model.SchemeLabel: "http", model.MetricsPathLabel: "/custom", model.JobLabel: "custom-job", @@ -142,7 +141,7 @@ func TestPopulateLabels(t *testing.T) { ScrapeTimeout: model.Duration(time.Second), }, res: labels.FromMap(map[string]string{ - model.AddressLabel: "[::1]:443", + model.AddressLabel: "[::1]", model.InstanceLabel: "custom-instance", model.SchemeLabel: "https", model.MetricsPathLabel: "/metrics", @@ -365,7 +364,6 @@ func TestPopulateLabels(t *testing.T) { ScrapeInterval: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second), }, - noDefaultPort: true, res: labels.FromMap(map[string]string{ model.AddressLabel: "1.2.3.4", model.InstanceLabel: "1.2.3.4", @@ -384,7 +382,7 @@ func TestPopulateLabels(t *testing.T) { model.ScrapeTimeoutLabel: "1s", }), }, - // Remove default port (http). + // verify that the default port is not removed (http). { in: labels.FromMap(map[string]string{ model.AddressLabel: "1.2.3.4:80", @@ -396,9 +394,8 @@ func TestPopulateLabels(t *testing.T) { ScrapeInterval: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second), }, - noDefaultPort: true, res: labels.FromMap(map[string]string{ - model.AddressLabel: "1.2.3.4", + model.AddressLabel: "1.2.3.4:80", model.InstanceLabel: "1.2.3.4:80", model.SchemeLabel: "http", model.MetricsPathLabel: "/metrics", @@ -415,7 +412,7 @@ func TestPopulateLabels(t *testing.T) { model.ScrapeTimeoutLabel: "1s", }), }, - // Remove default port (https). + // verify that the default port is not removed (https). { in: labels.FromMap(map[string]string{ model.AddressLabel: "1.2.3.4:443", @@ -427,9 +424,8 @@ func TestPopulateLabels(t *testing.T) { ScrapeInterval: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second), }, - noDefaultPort: true, res: labels.FromMap(map[string]string{ - model.AddressLabel: "1.2.3.4", + model.AddressLabel: "1.2.3.4:443", model.InstanceLabel: "1.2.3.4:443", model.SchemeLabel: "https", model.MetricsPathLabel: "/metrics", @@ -450,7 +446,7 @@ func TestPopulateLabels(t *testing.T) { for _, c := range cases { in := c.in.Copy() - res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg, c.noDefaultPort) + res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg) if c.err != "" { require.EqualError(t, err, c.err) } else { diff --git a/scrape/scrape.go b/scrape/scrape.go index dca4682b1..e88eb15a9 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -87,8 +87,6 @@ type scrapePool struct { // Constructor for new scrape loops. This is settable for testing convenience. newLoop func(scrapeLoopOptions) loop - noDefaultPort bool - metrics *scrapeMetrics scrapeFailureLogger log.Logger @@ -149,7 +147,6 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed logger: logger, metrics: metrics, httpOpts: options.HTTPClientOptions, - noDefaultPort: options.NoDefaultPort, } sp.newLoop = func(opts scrapeLoopOptions) loop { // Update the targets retrieval function for metadata to a new scrape cache. @@ -429,7 +426,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { sp.droppedTargets = []*Target{} sp.droppedTargetsCount = 0 for _, tg := range tgs { - targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb) + targets, failures := TargetsFromGroup(tg, sp.config, targets, lb) for _, err := range failures { level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) } diff --git a/scrape/target.go b/scrape/target.go index 375439833..06d4737ff 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -17,7 +17,6 @@ import ( "errors" "fmt" "hash/fnv" - "net" "net/url" "strings" "sync" @@ -424,7 +423,7 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels // PopulateLabels builds a label set from the given label set and scrape configuration. // It returns a label set before relabeling was applied as the second return value. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) { +func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -457,51 +456,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") } - // addPort checks whether we should add a default port to the address. - // If the address is not valid, we don't append a port either. - addPort := func(s string) (string, string, bool) { - // If we can split, a port exists and we don't have to add one. - if host, port, err := net.SplitHostPort(s); err == nil { - return host, port, false - } - // If adding a port makes it valid, the previous error - // was not due to an invalid address and we can append a port. - _, _, err := net.SplitHostPort(s + ":1234") - return "", "", err == nil - } - addr := lb.Get(model.AddressLabel) - scheme := lb.Get(model.SchemeLabel) - host, port, add := addPort(addr) - // If it's an address with no trailing port, infer it based on the used scheme - // unless the no-default-scrape-port feature flag is present. - if !noDefaultPort && add { - // Addresses reaching this point are already wrapped in [] if necessary. - switch scheme { - case "http", "": - addr += ":80" - case "https": - addr += ":443" - default: - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme) - } - lb.Set(model.AddressLabel, addr) - } - - if noDefaultPort { - // If it's an address with a trailing default port and the - // no-default-scrape-port flag is present, remove the port. - switch port { - case "80": - if scheme == "http" { - lb.Set(model.AddressLabel, host) - } - case "443": - if scheme == "https" { - lb.Set(model.AddressLabel, host) - } - } - } if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return labels.EmptyLabels(), labels.EmptyLabels(), err @@ -557,7 +512,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } // TargetsFromGroup builds targets based on the given TargetGroup and config. -func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) { +func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets []*Target, lb *labels.Builder) ([]*Target, []error) { targets = targets[:0] failures := []error{} @@ -573,7 +528,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault } } - lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort) + lset, origLabels, err := PopulateLabels(lb, cfg) if err != nil { failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) } diff --git a/scrape/target_test.go b/scrape/target_test.go index 84fe078b2..bd2795287 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -348,7 +348,7 @@ func TestTargetsFromGroup(t *testing.T) { ScrapeInterval: model.Duration(1 * time.Minute), } lb := labels.NewBuilder(labels.EmptyLabels()) - targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, false, nil, lb) + targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, nil, lb) require.Len(t, targets, 1) require.Len(t, failures, 1) require.EqualError(t, failures[0], expectedError) @@ -435,7 +435,7 @@ scrape_configs: lb := labels.NewBuilder(labels.EmptyLabels()) group := &targetgroup.Group{Targets: targets} for i := 0; i < b.N; i++ { - tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], false, tgets, lb) + tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], tgets, lb) if len(targets) != nTargets { b.Fatalf("Expected %d targets, got %d", nTargets, len(targets)) } From fcbd18dabb39f4f8a05e4d486ea1c7c6538397c4 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 25 Sep 2024 18:27:27 +0200 Subject: [PATCH 05/74] Remove Query page alert close buttons that don't do anything Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/query/QueryPage.tsx | 2 -- 1 file changed, 2 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/QueryPage.tsx b/web/ui/mantine-ui/src/pages/query/QueryPage.tsx index 3baa77dfd..71c969daf 100644 --- a/web/ui/mantine-ui/src/pages/query/QueryPage.tsx +++ b/web/ui/mantine-ui/src/pages/query/QueryPage.tsx @@ -84,7 +84,6 @@ export default function QueryPage() { icon={} color="red" title="Error fetching metrics list" - withCloseButton > Unable to fetch list of metric names: {metricNamesError.message} @@ -95,7 +94,6 @@ export default function QueryPage() { icon={} color="red" title="Error fetching server time" - withCloseButton > {timeError.message} From 15b68e989cd347c03c36b8aeabae99f550a55361 Mon Sep 17 00:00:00 2001 From: Craig Ringer Date: Wed, 18 Oct 2023 14:11:51 +1300 Subject: [PATCH 06/74] Refer to staleness in instant vector documentation The instant vector documentation does not explain which metric samples are selected - in particular, it makes no reference to staleness. It's confusing when reading the docs to understand how exactly Prometheus selects the metrics to report: the most recent sample older than the search timestamp specified in the API request, so long as that metric is not "stale". Signed-off-by: Craig Ringer --- docs/querying/basics.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 4ea186bee..66d7b8018 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -109,8 +109,16 @@ single sample value for each at a given timestamp (point in time). In the simpl form, only a metric name is specified, which results in an instant vector containing elements for all time series that have this metric name. +The value returned will be that of the most recent sample at or before the +query's evaluation timestamp (in the case of an +[instant query](api.md#instant-queries)) +or the current step within the query (in the case of a +[range query](api.md/#range-queries)). +The [`@` modifier](#modifier) allows overriding the timestamp relative to which +the selection takes place. Time series are only returned if their most recent sample is less than the [lookback period](#staleness) ago. + This example selects all time series that have the `http_requests_total` metric -name: +name, returning the most recent sample for each: http_requests_total @@ -359,7 +367,8 @@ cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated time series do not precisely align in time. Because of their independence, Prometheus needs to assign a value at those timestamps for each relevant time series. It does so by taking the newest sample that is less than the lookback period ago. -The lookback period is 5 minutes by default. +The lookback period is 5 minutes by default, but can be +[set with the `--query.lookback-delta` flag](../command-line/prometheus.md) If a target scrape or rule evaluation no longer returns a sample for a time series that was previously present, this time series will be marked as stale. From 4fd2556baa8bc11d49529abb92163feca33d1a58 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Thu, 26 Sep 2024 15:43:19 +0200 Subject: [PATCH 07/74] Extract processWithBoundedParallelismAndConsistentWorkers Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 93 +++++++++++++++++++++++------------------- 1 file changed, 52 insertions(+), 41 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index e6a6c708f..f8415407e 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -300,52 +300,34 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma // Deleting label names mutates p.m map, so it should be done from a single goroutine after nobody else is reading it. deleteLabelNames := make(chan string, len(p.m)) - process := func(l labels.Label) { - orig := p.m[l.Name][l.Value] - repl := make([]storage.SeriesRef, 0, len(orig)) - for _, id := range orig { - if _, ok := deleted[id]; !ok { - repl = append(repl, id) + process, wait := processWithBoundedParallelismAndConsistentWorkers( + runtime.GOMAXPROCS(0), + func(l labels.Label) uint64 { return xxhash.Sum64String(l.Name) }, + func(l labels.Label) { + orig := p.m[l.Name][l.Value] + repl := make([]storage.SeriesRef, 0, len(orig)) + for _, id := range orig { + if _, ok := deleted[id]; !ok { + repl = append(repl, id) + } } - } - if len(repl) > 0 { - p.m[l.Name][l.Value] = repl - } else { - delete(p.m[l.Name], l.Value) - if len(p.m[l.Name]) == 0 { - // Delete the key if we removed all values. - deleteLabelNames <- l.Name + if len(repl) > 0 { + p.m[l.Name][l.Value] = repl + } else { + delete(p.m[l.Name], l.Value) + if len(p.m[l.Name]) == 0 { + // Delete the key if we removed all values. + deleteLabelNames <- l.Name + } } - } - } + }, + ) - // Create GOMAXPROCS workers. - wg := sync.WaitGroup{} - jobs := make([]chan labels.Label, runtime.GOMAXPROCS(0)) - for i := range jobs { - jobs[i] = make(chan labels.Label, 128) - wg.Add(1) - go func(jobs chan labels.Label) { - defer wg.Done() - for l := range jobs { - process(l) - } - }(jobs[i]) - } - - // Process all affected labels and the allPostingsKey. for l := range affected { - j := int(xxhash.Sum64String(l.Name) % uint64(len(jobs))) - jobs[j] <- l + process(l) } - j := int(xxhash.Sum64String(allPostingsKey.Name) % uint64(len(jobs))) - jobs[j] <- allPostingsKey - - // Close jobs channels and wait all workers to finish. - for i := range jobs { - close(jobs[i]) - } - wg.Wait() + process(allPostingsKey) + wait() // Close deleteLabelNames channel and delete the label names requested. close(deleteLabelNames) @@ -354,6 +336,35 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma } } +// processWithBoundedParallelismAndConsistentWorkers will call f() with bounded parallelism, +// making sure that elements with same hash(T) will always be processed by the same worker. +// Call process() to add more jobs to process, and once finished adding, call wait() to ensure that all jobs are processed. +func processWithBoundedParallelismAndConsistentWorkers[T any](workers int, hash func(T) uint64, f func(T)) (process func(T), wait func()) { + wg := &sync.WaitGroup{} + jobs := make([]chan T, workers) + for i := 0; i < workers; i++ { + wg.Add(1) + jobs[i] = make(chan T, 128) + go func(jobs <-chan T) { + defer wg.Done() + for l := range jobs { + f(l) + } + }(jobs[i]) + } + + process = func(job T) { + jobs[hash(job)%uint64(workers)] <- job + } + wait = func() { + for i := range jobs { + close(jobs[i]) + } + wg.Wait() + } + return process, wait +} + // Iter calls f for each postings list. It aborts if f returns an error and returns it. func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error { p.mtx.RLock() From 6bd9b1a7cc0369952da4467a4f38c4c8c6dd4629 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Fri, 19 Jul 2024 11:28:00 -0300 Subject: [PATCH 08/74] Histogram CT Zero ingestion Signed-off-by: Arthur Silva Sens --- cmd/prometheus/main.go | 4 + scrape/helpers_test.go | 20 ++- scrape/manager_test.go | 174 +++++++++++++++++++++++++++ scrape/scrape.go | 10 +- scrape/scrape_test.go | 6 +- storage/fanout.go | 14 +++ storage/interface.go | 17 ++- storage/remote/write.go | 5 + storage/remote/write_handler_test.go | 7 ++ tsdb/agent/db.go | 5 + tsdb/head_append.go | 113 ++++++++++++++++- tsdb/head_test.go | 160 ++++++++++++++++++++++++ 12 files changed, 526 insertions(+), 9 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e7fd82e6f..7d9106b33 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1597,6 +1597,10 @@ func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels return 0, tsdb.ErrNotReady } +func (n notReadyAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, tsdb.ErrNotReady +} + func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index 116fa5c94..4f7918f79 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -55,6 +55,10 @@ func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *h return 0, nil } +func (a nopAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, nil +} + func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { return 0, nil } @@ -78,9 +82,10 @@ func equalFloatSamples(a, b floatSample) bool { } type histogramSample struct { - t int64 - h *histogram.Histogram - fh *histogram.FloatHistogram + metric labels.Labels + t int64 + h *histogram.Histogram + fh *histogram.FloatHistogram } type collectResultAppendable struct { @@ -146,7 +151,7 @@ func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.L func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { a.mtx.Lock() defer a.mtx.Unlock() - a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t}) + a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t, metric: l}) if a.next == nil { return 0, nil } @@ -154,6 +159,13 @@ func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels. return a.next.AppendHistogram(ref, l, t, h, fh) } +func (a *collectResultAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if h != nil { + return a.AppendHistogram(ref, l, ct, &histogram.Histogram{}, nil) + } + return a.AppendHistogram(ref, l, ct, nil, &histogram.FloatHistogram{}) +} + func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { a.mtx.Lock() defer a.mtx.Unlock() diff --git a/scrape/manager_test.go b/scrape/manager_test.go index cd712ca62..13a369812 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -39,8 +39,10 @@ import ( "github.com/prometheus/prometheus/discovery" _ "github.com/prometheus/prometheus/discovery/file" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/runutil" "github.com/prometheus/prometheus/util/testutil" ) @@ -858,6 +860,178 @@ func TestManagerCTZeroIngestion(t *testing.T) { } } +// generateTestHistogram generates the same thing as tsdbutil.GenerateTestHistogram, +// but in the form of dto.Histogram. +func generateTestHistogram(i int) *dto.Histogram { + helper := tsdbutil.GenerateTestHistogram(i) + h := &dto.Histogram{} + h.SampleCount = proto.Uint64(helper.Count) + h.SampleSum = proto.Float64(helper.Sum) + h.Schema = proto.Int32(helper.Schema) + h.ZeroThreshold = proto.Float64(helper.ZeroThreshold) + h.ZeroCount = proto.Uint64(helper.ZeroCount) + h.PositiveSpan = make([]*dto.BucketSpan, len(helper.PositiveSpans)) + for i, span := range helper.PositiveSpans { + h.PositiveSpan[i] = &dto.BucketSpan{ + Offset: proto.Int32(span.Offset), + Length: proto.Uint32(span.Length), + } + } + h.PositiveDelta = helper.PositiveBuckets + h.NegativeSpan = make([]*dto.BucketSpan, len(helper.NegativeSpans)) + for i, span := range helper.NegativeSpans { + h.NegativeSpan[i] = &dto.BucketSpan{ + Offset: proto.Int32(span.Offset), + Length: proto.Uint32(span.Length), + } + } + h.NegativeDelta = helper.NegativeBuckets + return h +} + +func TestManagerCTZeroIngestionHistogram(t *testing.T) { + const mName = "expected_histogram" + + for _, tc := range []struct { + name string + inputHistSample *dto.Histogram + enableCTZeroIngestion bool + }{ + { + name: "disabled with CT on histogram", + inputHistSample: func() *dto.Histogram { + h := generateTestHistogram(0) + h.CreatedTimestamp = timestamppb.Now() + return h + }(), + enableCTZeroIngestion: false, + }, + { + name: "enabled with CT on histogram", + inputHistSample: func() *dto.Histogram { + h := generateTestHistogram(0) + h.CreatedTimestamp = timestamppb.Now() + return h + }(), + enableCTZeroIngestion: true, + }, + { + name: "enabled without CT on histogram", + inputHistSample: func() *dto.Histogram { + h := generateTestHistogram(0) + return h + }(), + enableCTZeroIngestion: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + app := &collectResultAppender{} + scrapeManager, err := NewManager( + &Options{ + EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion, + EnableNativeHistogramsIngestion: true, + skipOffsetting: true, + }, + log.NewLogfmtLogger(os.Stderr), + nil, + &collectResultAppendable{app}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ + GlobalConfig: config.GlobalConfig{ + // Disable regular scrapes. + ScrapeInterval: model.Duration(9999 * time.Minute), + ScrapeTimeout: model.Duration(5 * time.Second), + // Ensure the proto is chosen. We need proto as it's the only protocol + // with the CT parsing support. + ScrapeProtocols: []config.ScrapeProtocol{config.PrometheusProto}, + }, + ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test"}}, + })) + + once := sync.Once{} + // Start fake HTTP target to that allow one scrape only. + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fail := true // TODO(bwplotka): Kill or use? + once.Do(func() { + fail = false + w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) + + ctrType := dto.MetricType_HISTOGRAM + w.Write(protoMarshalDelimited(t, &dto.MetricFamily{ + Name: proto.String(mName), + Type: &ctrType, + Metric: []*dto.Metric{{Histogram: tc.inputHistSample}}, + })) + }) + + if fail { + w.WriteHeader(http.StatusInternalServerError) + } + }), + ) + defer server.Close() + + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + // Add fake target directly into tsets + reload. Normally users would use + // Manager.Run and wait for minimum 5s refresh interval. + scrapeManager.updateTsets(map[string][]*targetgroup.Group{ + "test": {{ + Targets: []model.LabelSet{{ + model.SchemeLabel: model.LabelValue(serverURL.Scheme), + model.AddressLabel: model.LabelValue(serverURL.Host), + }}, + }}, + }) + scrapeManager.reload() + + var got []histogramSample + + // Wait for one scrape. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { + app.mtx.Lock() + defer app.mtx.Unlock() + + // Check if scrape happened and grab the relevant histograms, they have to be there - or it's a bug + // and it's not worth waiting. + for _, h := range app.resultHistograms { + if h.metric.Get(model.MetricNameLabel) == mName { + got = append(got, h) + } + } + if len(app.resultHistograms) > 0 { + return nil + } + return fmt.Errorf("expected some histogram samples, got none") + }), "after 1 minute") + scrapeManager.Stop() + + // Check for zero samples, assuming we only injected always one histogram sample. + // Did it contain CT to inject? If yes, was CT zero enabled? + if tc.inputHistSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion { + require.Len(t, got, 2) + // Zero sample. + require.Equal(t, histogram.Histogram{}, *got[0].h) + // Quick soft check to make sure it's the same sample or at least not zero. + require.Equal(t, tc.inputHistSample.GetSampleSum(), got[1].h.Sum) + return + } + + // Expect only one, valid sample. + require.Len(t, got, 1) + // Quick soft check to make sure it's the same sample or at least not zero. + require.Equal(t, tc.inputHistSample.GetSampleSum(), got[0].h.Sum) + }) + } +} + func TestUnregisterMetrics(t *testing.T) { reg := prometheus.NewRegistry() // Check that all metrics can be unregistered, allowing a second manager to be created. diff --git a/scrape/scrape.go b/scrape/scrape.go index dca4682b1..f29beeb03 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1701,7 +1701,15 @@ loop: } else { if sl.enableCTZeroIngestion { if ctMs := p.CreatedTimestamp(); ctMs != nil { - ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if isHistogram && sl.enableNativeHistogramIngestion { + if h != nil { + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, h, nil) + } else { + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, nil, fh) + } + } else { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + } if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. // CT is an experimental feature. For now, we don't need to fail the // scrape on errors updating the created timestamp, log debug. diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 9887924c3..9e49fe8ef 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1999,7 +1999,8 @@ metric: < `, contentType: "application/vnd.google.protobuf", histograms: []histogramSample{{ - t: 1234568, + t: 1234568, + metric: labels.FromStrings("__name__", "test_histogram"), h: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -2125,7 +2126,8 @@ metric: < {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175}, }, histograms: []histogramSample{{ - t: 1234568, + t: 1234568, + metric: labels.FromStrings("__name__", "test_histogram"), h: &histogram.Histogram{ Count: 175, ZeroCount: 2, diff --git a/storage/fanout.go b/storage/fanout.go index e52342bc7..80022b256 100644 --- a/storage/fanout.go +++ b/storage/fanout.go @@ -190,6 +190,20 @@ func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64 return ref, nil } +func (f *fanoutAppender) AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) { + ref, err := f.primary.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) { ref, err := f.primary.UpdateMetadata(ref, l, m) if err != nil { diff --git a/storage/interface.go b/storage/interface.go index 9654c8833..7ac93129e 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -50,7 +50,8 @@ var ( // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected // behaviour, and we currently don't have a way to determine this. As a result // it's recommended to ignore this error for now. - ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") + ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") + ErrCTNewerThanSample = fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") ) // SeriesRef is a generic series reference. In prometheus it is either a @@ -313,6 +314,20 @@ type HistogramAppender interface { // pointer. AppendHistogram won't mutate the histogram, but in turn // depends on the caller to not mutate it either. AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) + // AppendHistogramCTZeroSample adds synthetic zero sample for the given ct timestamp, + // which will be associated with given series, labels and the incoming + // sample's t (timestamp). AppendHistogramCTZeroSample returns error if zero sample can't be + // appended, for example when ct is too old, or when it would collide with + // incoming sample (sample has priority). + // + // AppendHistogramCTZeroSample has to be called before the corresponding histogram AppendHistogram. + // A series reference number is returned which can be used to modify the + // CT for the given series in the same or later transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to AppendHistogramCTZeroSample() at any point. + // + // If the reference is 0 it must not be used for caching. + AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) } // MetadataUpdater provides an interface for associating metadata to stored series. diff --git a/storage/remote/write.go b/storage/remote/write.go index eba429084..624732c4f 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -306,6 +306,11 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, return 0, nil } +func (t *timestampTracker) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + // TODO: Implement + return 0, nil +} + func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. // UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now. diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 5c89a1ab9..8e628f40d 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -915,6 +915,13 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t return 0, nil } +func (m *mockAppendable) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + // AppendCTZeroSample is no-op for remote-write for now. + // TODO(bwplotka/arthursens): Add support for PRW 2.0 for CT zero feature (but also we might + // replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218). + return 0, nil +} + func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) { if m.updateMetadataErr != nil { return 0, m.updateMetadataErr diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 596d5c8a3..5e33fce80 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -972,6 +972,11 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int return storage.SeriesRef(series.ref), nil } +func (a *appender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + // TODO(bwplotka/arthursens): Wire metadata in the Agent's appender. + return 0, nil +} + func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { // TODO: Wire metadata in the Agent's appender. return 0, nil diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 3dd9a367b..10fb17809 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -79,6 +79,16 @@ func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t return a.app.AppendHistogram(ref, l, t, h, fh) } +func (a *initAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) + } + a.head.initTime(t) + a.app = a.head.appender() + + return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) +} + func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { if a.app != nil { return a.app.UpdateMetadata(ref, l, m) @@ -388,7 +398,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 // storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation. func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { if ct >= t { - return 0, fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") + return 0, storage.ErrCTNewerThanSample } s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) @@ -747,6 +757,107 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels return storage.SeriesRef(s.ref), nil } +func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if !a.head.opts.EnableNativeHistograms.Load() { + return 0, storage.ErrNativeHistogramsDisabled + } + + if ct >= t { + return 0, storage.ErrCTNewerThanSample + } + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + // Ensure no empty labels have gotten through. + lset = lset.WithoutEmpty() + if lset.IsEmpty() { + return 0, fmt.Errorf("empty labelset: %w", ErrInvalidSample) + } + + if l, dup := lset.HasDuplicateLabelNames(); dup { + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) + } + + var created bool + var err error + s, created, err = a.head.getOrCreate(lset.Hash(), lset) + if err != nil { + return 0, err + } + if created { + switch { + case h != nil: + s.lastHistogramValue = &histogram.Histogram{} + case fh != nil: + s.lastFloatHistogramValue = &histogram.FloatHistogram{} + } + a.series = append(a.series, record.RefSeries{ + Ref: s.ref, + Labels: lset, + }) + } + } + + switch { + case h != nil: + zeroHistogram := &histogram.Histogram{} + s.Lock() + // Although we call `appendableHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // We set it to true to make this implementation as close as possible to the float implementation. + isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) + if err != nil { + s.Unlock() + if errors.Is(err, storage.ErrOutOfOrderSample) { + return 0, storage.ErrOutOfOrderCT + } + } + // OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples. + // This is to prevent the injected zero from being marked as OOO forever. + if isOOO { + s.Unlock() + return 0, storage.ErrOutOfOrderCT + } + s.pendingCommit = true + s.Unlock() + a.histograms = append(a.histograms, record.RefHistogramSample{ + Ref: s.ref, + T: ct, + H: zeroHistogram, + }) + a.histogramSeries = append(a.histogramSeries, s) + case fh != nil: + zeroFloatHistogram := &histogram.FloatHistogram{} + s.Lock() + // Although we call `appendableFloatHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // We set it to true to make this implementation as close as possible to the float implementation. + isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) // OOO is not allowed for CTZeroSamples. + if err != nil { + s.Unlock() + if errors.Is(err, storage.ErrOutOfOrderSample) { + return 0, storage.ErrOutOfOrderCT + } + } + // OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples. + // This is to prevent the injected zero from being marked as OOO forever. + if isOOO { + s.Unlock() + return 0, storage.ErrOutOfOrderCT + } + s.pendingCommit = true + s.Unlock() + a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{ + Ref: s.ref, + T: ct, + FH: zeroFloatHistogram, + }) + a.floatHistogramSeries = append(a.floatHistogramSeries, s) + } + + if ct > a.maxt { + a.maxt = ct + } + return storage.SeriesRef(s.ref), nil +} + // UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't // use getOrCreate or make any of the lset sanity checks that Append does. func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 483121dc6..ebfd1ff8b 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -6363,6 +6363,166 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { } } +func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { + testHistogram := tsdbutil.GenerateTestHistogram(1) + testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1) + lbls := labels.FromStrings("foo", "bar") + type appendableHistograms struct { + ts int64 + h *histogram.Histogram + fh *histogram.FloatHistogram + ct int64 + } + for _, tc := range []struct { + name string + appendableHistograms []appendableHistograms + expectedHistograms []chunks.Sample + }{ + { + name: "In order ct+normal sample/histogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 101, h: testHistogram, ct: 1}, + }, + expectedHistograms: func() []chunks.Sample { + hNoCounterReset := *testHistogram + hNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &hNoCounterReset}, + } + }(), + }, + { + name: "In order ct+normal sample/floathistogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 101, fh: testFloatHistogram, ct: 1}, + }, + expectedHistograms: func() []chunks.Sample { + fhNoCounterReset := *testFloatHistogram + fhNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &fhNoCounterReset}, + } + }(), + }, + { + name: "Consecutive appends with same ct ignore ct/histogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 101, h: testHistogram, ct: 1}, + }, + expectedHistograms: func() []chunks.Sample { + hNoCounterReset := *testHistogram + hNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &hNoCounterReset}, + } + }(), + }, + { + name: "Consecutive appends with same ct ignore ct/floathistogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 101, fh: testFloatHistogram, ct: 1}, + }, + expectedHistograms: func() []chunks.Sample { + fhNoCounterReset := *testFloatHistogram + fhNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &fhNoCounterReset}, + } + }(), + }, + { + name: "Consecutive appends with newer ct do not ignore ct/histogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 102, h: testHistogram, ct: 101}, + }, + expectedHistograms: []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &histogram.Histogram{CounterResetHint: histogram.CounterReset}}, + sample{t: 102, h: testHistogram}, + }, + }, + { + name: "Consecutive appends with newer ct do not ignore ct/floathistogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 102, fh: testFloatHistogram, ct: 101}, + }, + expectedHistograms: []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &histogram.FloatHistogram{CounterResetHint: histogram.CounterReset}}, + sample{t: 102, fh: testFloatHistogram}, + }, + }, + { + name: "CT equals to previous sample timestamp is ignored/histogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 101, h: testHistogram, ct: 100}, + }, + expectedHistograms: func() []chunks.Sample { + hNoCounterReset := *testHistogram + hNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &hNoCounterReset}, + } + }(), + }, + { + name: "CT equals to previous sample timestamp is ignored/floathistogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 101, fh: testFloatHistogram, ct: 100}, + }, + expectedHistograms: func() []chunks.Sample { + fhNoCounterReset := *testFloatHistogram + fhNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &fhNoCounterReset}, + } + }(), + }, + } { + t.Run(tc.name, func(t *testing.T) { + head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + defer func() { + require.NoError(t, head.Close()) + }() + appender := head.Appender(context.Background()) + for _, sample := range tc.appendableHistograms { + ref, err := appender.AppendHistogramCTZeroSample(0, lbls, sample.ts, sample.ct, sample.h, sample.fh) + require.NoError(t, err) + _, err = appender.AppendHistogram(ref, lbls, sample.ts, sample.h, sample.fh) + require.NoError(t, err) + } + require.NoError(t, appender.Commit()) + + q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + result := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) + require.Equal(t, tc.expectedHistograms, result[`{foo="bar"}`]) + }) + } +} + func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) { // Use a chunk range of 1 here so that if we attempted to determine if the head // was compactable using default values for min and max times, `Head.compactable()` From 95a53ef982135de8faf9ba2757143e31dc0490f4 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 25 Sep 2024 16:17:20 -0300 Subject: [PATCH 09/74] Join tests for appending float and histogram CTs Signed-off-by: Arthur Silva Sens --- tsdb/head_test.go | 197 +++++++++++++++++++++------------------------- 1 file changed, 88 insertions(+), 109 deletions(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index ebfd1ff8b..8c401bc6f 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -6281,11 +6281,15 @@ func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing require.ErrorIs(t, err, storage.NewDuplicateHistogramToFloatErr(2_000, 10.0)) } -func TestHeadAppender_AppendCTZeroSample(t *testing.T) { +func TestHeadAppender_AppendCT(t *testing.T) { + testHistogram := tsdbutil.GenerateTestHistogram(1) + testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1) type appendableSamples struct { - ts int64 - val float64 - ct int64 + ts int64 + fSample float64 + h *histogram.Histogram + fh *histogram.FloatHistogram + ct int64 } for _, tc := range []struct { name string @@ -6293,20 +6297,10 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { expectedSamples []chunks.Sample }{ { - name: "In order ct+normal sample", + name: "In order ct+normal sample/floatSample", appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - }, - expectedSamples: []chunks.Sample{ - sample{t: 1, f: 0}, - sample{t: 100, f: 10}, - }, - }, - { - name: "Consecutive appends with same ct ignore ct", - appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - {ts: 101, val: 10, ct: 1}, + {ts: 100, fSample: 10, ct: 1}, + {ts: 101, fSample: 10, ct: 1}, }, expectedSamples: []chunks.Sample{ sample{t: 1, f: 0}, @@ -6314,77 +6308,13 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { sample{t: 101, f: 10}, }, }, - { - name: "Consecutive appends with newer ct do not ignore ct", - appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - {ts: 102, val: 10, ct: 101}, - }, - expectedSamples: []chunks.Sample{ - sample{t: 1, f: 0}, - sample{t: 100, f: 10}, - sample{t: 101, f: 0}, - sample{t: 102, f: 10}, - }, - }, - { - name: "CT equals to previous sample timestamp is ignored", - appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - {ts: 101, val: 10, ct: 100}, - }, - expectedSamples: []chunks.Sample{ - sample{t: 1, f: 0}, - sample{t: 100, f: 10}, - sample{t: 101, f: 10}, - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) - defer func() { - require.NoError(t, h.Close()) - }() - a := h.Appender(context.Background()) - lbls := labels.FromStrings("foo", "bar") - for _, sample := range tc.appendableSamples { - _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) - require.NoError(t, err) - _, err = a.Append(0, lbls, sample.ts, sample.val) - require.NoError(t, err) - } - require.NoError(t, a.Commit()) - - q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) - require.NoError(t, err) - result := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.Equal(t, tc.expectedSamples, result[`{foo="bar"}`]) - }) - } -} - -func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { - testHistogram := tsdbutil.GenerateTestHistogram(1) - testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1) - lbls := labels.FromStrings("foo", "bar") - type appendableHistograms struct { - ts int64 - h *histogram.Histogram - fh *histogram.FloatHistogram - ct int64 - } - for _, tc := range []struct { - name string - appendableHistograms []appendableHistograms - expectedHistograms []chunks.Sample - }{ { name: "In order ct+normal sample/histogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, h: testHistogram, ct: 1}, {ts: 101, h: testHistogram, ct: 1}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { hNoCounterReset := *testHistogram hNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6396,11 +6326,11 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { }, { name: "In order ct+normal sample/floathistogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, fh: testFloatHistogram, ct: 1}, {ts: 101, fh: testFloatHistogram, ct: 1}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { fhNoCounterReset := *testFloatHistogram fhNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6410,13 +6340,25 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { } }(), }, + { + name: "Consecutive appends with same ct ignore ct/floatSample", + appendableSamples: []appendableSamples{ + {ts: 100, fSample: 10, ct: 1}, + {ts: 101, fSample: 10, ct: 1}, + }, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 10}, + }, + }, { name: "Consecutive appends with same ct ignore ct/histogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, h: testHistogram, ct: 1}, {ts: 101, h: testHistogram, ct: 1}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { hNoCounterReset := *testHistogram hNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6428,11 +6370,11 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { }, { name: "Consecutive appends with same ct ignore ct/floathistogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, fh: testFloatHistogram, ct: 1}, {ts: 101, fh: testFloatHistogram, ct: 1}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { fhNoCounterReset := *testFloatHistogram fhNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6442,13 +6384,26 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { } }(), }, + { + name: "Consecutive appends with newer ct do not ignore ct/floatSample", + appendableSamples: []appendableSamples{ + {ts: 100, fSample: 10, ct: 1}, + {ts: 102, fSample: 10, ct: 101}, + }, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 0}, + sample{t: 102, f: 10}, + }, + }, { name: "Consecutive appends with newer ct do not ignore ct/histogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, h: testHistogram, ct: 1}, {ts: 102, h: testHistogram, ct: 101}, }, - expectedHistograms: []chunks.Sample{ + expectedSamples: []chunks.Sample{ sample{t: 1, h: &histogram.Histogram{}}, sample{t: 100, h: testHistogram}, sample{t: 101, h: &histogram.Histogram{CounterResetHint: histogram.CounterReset}}, @@ -6457,24 +6412,36 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { }, { name: "Consecutive appends with newer ct do not ignore ct/floathistogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, fh: testFloatHistogram, ct: 1}, {ts: 102, fh: testFloatHistogram, ct: 101}, }, - expectedHistograms: []chunks.Sample{ + expectedSamples: []chunks.Sample{ sample{t: 1, fh: &histogram.FloatHistogram{}}, sample{t: 100, fh: testFloatHistogram}, sample{t: 101, fh: &histogram.FloatHistogram{CounterResetHint: histogram.CounterReset}}, sample{t: 102, fh: testFloatHistogram}, }, }, + { + name: "CT equals to previous sample timestamp is ignored/floatSample", + appendableSamples: []appendableSamples{ + {ts: 100, fSample: 10, ct: 1}, + {ts: 101, fSample: 10, ct: 100}, + }, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 10}, + }, + }, { name: "CT equals to previous sample timestamp is ignored/histogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, h: testHistogram, ct: 1}, {ts: 101, h: testHistogram, ct: 100}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { hNoCounterReset := *testHistogram hNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6486,11 +6453,11 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { }, { name: "CT equals to previous sample timestamp is ignored/floathistogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, fh: testFloatHistogram, ct: 1}, {ts: 101, fh: testFloatHistogram, ct: 100}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { fhNoCounterReset := *testFloatHistogram fhNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6502,23 +6469,35 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) defer func() { - require.NoError(t, head.Close()) + require.NoError(t, h.Close()) }() - appender := head.Appender(context.Background()) - for _, sample := range tc.appendableHistograms { - ref, err := appender.AppendHistogramCTZeroSample(0, lbls, sample.ts, sample.ct, sample.h, sample.fh) - require.NoError(t, err) - _, err = appender.AppendHistogram(ref, lbls, sample.ts, sample.h, sample.fh) - require.NoError(t, err) - } - require.NoError(t, appender.Commit()) + a := h.Appender(context.Background()) + lbls := labels.FromStrings("foo", "bar") + for _, sample := range tc.appendableSamples { + // Append float if it's a float test case + if sample.fSample != 0 { + _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) + require.NoError(t, err) + _, err = a.Append(0, lbls, sample.ts, sample.fSample) + require.NoError(t, err) + } - q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64) + // Append histograms if it's a histogram test case + if sample.h != nil || sample.fh != nil { + ref, err := a.AppendHistogramCTZeroSample(0, lbls, sample.ts, sample.ct, sample.h, sample.fh) + require.NoError(t, err) + _, err = a.AppendHistogram(ref, lbls, sample.ts, sample.h, sample.fh) + require.NoError(t, err) + } + } + require.NoError(t, a.Commit()) + + q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) require.NoError(t, err) result := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.Equal(t, tc.expectedHistograms, result[`{foo="bar"}`]) + require.Equal(t, tc.expectedSamples, result[`{foo="bar"}`]) }) } } From 5710ddf24fd9a15710b391400aae4e4cc426e97a Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 26 Sep 2024 15:32:18 +0100 Subject: [PATCH 10/74] [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers (#13909) * [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers So they don't continue to report stale values. Signed-off-by: Bryan Boreham --- notifier/notifier.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/notifier/notifier.go b/notifier/notifier.go index 218e4cb8c..5374e73d6 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -770,6 +770,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { s.mtx.Lock() defer s.mtx.Unlock() + previousAms := s.ams // Set new Alertmanagers and deduplicate them along their unique URL. s.ams = []alertmanager{} s.droppedAms = []alertmanager{} @@ -789,6 +790,17 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { seen[us] = struct{}{} s.ams = append(s.ams, am) } + // Now remove counters for any removed Alertmanagers. + for _, am := range previousAms { + us := am.url().String() + if _, ok := seen[us]; ok { + continue + } + s.metrics.latency.DeleteLabelValues(us) + s.metrics.sent.DeleteLabelValues(us) + s.metrics.errors.DeleteLabelValues(us) + seen[us] = struct{}{} + } } func postPath(pre string, v config.AlertmanagerAPIVersion) string { From 79a6238e195ecc1c20937036c1e3b4e3bdaddc49 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Thu, 26 Sep 2024 18:35:15 +0200 Subject: [PATCH 11/74] scrape/scrape_test.go: reduce the time it takes to reload the manager (#14447) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * scrape/scrape_test.go: reduce the time it takes to reload the manager TestNativeHistogramMaxSchemaSet took over 3x5s to complete because there's a minimum reload interval. I've made the testcases run in parallel and reduced the reload interval to 10ms. Now the test runs in around 0.1-0.2 seconds. Ran test 10000 times to check if it's flaky. Signed-off-by: György Krajcsovits --------- Signed-off-by: György Krajcsovits --- scrape/scrape_test.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 9e49fe8ef..04fd53601 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -3773,7 +3773,9 @@ func TestNativeHistogramMaxSchemaSet(t *testing.T) { }, } for name, tc := range testcases { + tc := tc t.Run(name, func(t *testing.T) { + t.Parallel() testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema) }) } @@ -3816,8 +3818,8 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec configStr := fmt.Sprintf(` global: metric_name_validation_scheme: legacy - scrape_interval: 1s - scrape_timeout: 1s + scrape_interval: 50ms + scrape_timeout: 25ms scrape_configs: - job_name: test %s @@ -3830,7 +3832,7 @@ scrape_configs: s.DB.EnableNativeHistograms() reg := prometheus.NewRegistry() - mng, err := NewManager(&Options{EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) + mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) require.NoError(t, err) cfg, err := config.Load(configStr, false, log.NewNopLogger()) require.NoError(t, err) @@ -3861,7 +3863,7 @@ scrape_configs: countSeries++ } return countSeries > 0 - }, 15*time.Second, 100*time.Millisecond) + }, 5*time.Second, 100*time.Millisecond) // Check that native histogram schema is as expected. q, err := s.Querier(0, math.MaxInt64) From 8aef821e1002a3c4425f7c850b2ae9e69e28bb1e Mon Sep 17 00:00:00 2001 From: Neeraj Gartia Date: Fri, 27 Sep 2024 01:10:00 +0530 Subject: [PATCH 12/74] eval_info command Signed-off-by: Neeraj Gartia --- promql/promqltest/test.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index f0649a77a..642b47444 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -46,8 +46,8 @@ import ( var ( patSpace = regexp.MustCompile("[\t ]+") patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) - patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) - patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered|info))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) + patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) ) const ( @@ -321,6 +321,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { cmd.fail = true case "warn": cmd.warn = true + case "info": + cmd.info = true } for j := 1; i+1 < len(lines); j++ { @@ -657,10 +659,10 @@ type evalCmd struct { step time.Duration line int - isRange bool // if false, instant query - fail, warn, ordered bool - expectedFailMessage string - expectedFailRegexp *regexp.Regexp + isRange bool // if false, instant query + fail, warn, ordered, info bool + expectedFailMessage string + expectedFailRegexp *regexp.Regexp metrics map[uint64]labels.Labels expectScalar bool @@ -1208,13 +1210,16 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } - countWarnings, _ := res.Warnings.CountWarningsAndInfo() + countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() if !cmd.warn && countWarnings > 0 { return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) } if cmd.warn && countWarnings == 0 { return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) } + if cmd.info && countInfo == 0 { + return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line) + } err = cmd.compareResult(res.Value) if err != nil { return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) From 410fcce6f0484d59184e5c5c0dd13ea454cd2f9e Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 27 Sep 2024 07:45:49 +0100 Subject: [PATCH 13/74] Remove unnecessary pprof import (#14988) The pattern of `import _ "net/http/pprof"` adds handlers to the default http handler, but Prometheus does not use that. There are explicit handlers in `web/web.go`. So, we can remove this line with no impact to behaviour. Signed-off-by: Bryan Boreham --- cmd/prometheus/main.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 2a96a6ba7..0e05b843d 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -22,7 +22,6 @@ import ( "math/bits" "net" "net/http" - _ "net/http/pprof" // Comment this line to disable pprof endpoint. "net/url" "os" "os/signal" From ada8a6ef10c37ec0ea37b2e0c21e4ec2187a6fa8 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Fri, 27 Sep 2024 10:14:39 +0200 Subject: [PATCH 14/74] Add some more tests for MemPostings_Delete Signed-off-by: Oleg Zaytsev --- tsdb/index/postings_test.go | 86 +++++++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 27 deletions(-) diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 1802c9e89..b41fb54e6 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -973,37 +973,69 @@ func TestMemPostingsStats(t *testing.T) { } func TestMemPostings_Delete(t *testing.T) { - p := NewMemPostings() - p.Add(1, labels.FromStrings("lbl1", "a")) - p.Add(2, labels.FromStrings("lbl1", "b")) - p.Add(3, labels.FromStrings("lbl2", "a")) + t.Run("some postings", func(t *testing.T) { + p := NewMemPostings() + p.Add(1, labels.FromStrings("lbl1", "a")) + p.Add(2, labels.FromStrings("lbl1", "b")) + p.Add(3, labels.FromStrings("lbl2", "a")) - before := p.Get(allPostingsKey.Name, allPostingsKey.Value) - deletedRefs := map[storage.SeriesRef]struct{}{ - 2: {}, - } - affectedLabels := map[labels.Label]struct{}{ - {Name: "lbl1", Value: "b"}: {}, - } - p.Delete(deletedRefs, affectedLabels) - after := p.Get(allPostingsKey.Name, allPostingsKey.Value) + before := p.Get(allPostingsKey.Name, allPostingsKey.Value) + deletedRefs := map[storage.SeriesRef]struct{}{ + 2: {}, + } + affectedLabels := map[labels.Label]struct{}{ + {Name: "lbl1", Value: "b"}: {}, + } + p.Delete(deletedRefs, affectedLabels) + after := p.Get(allPostingsKey.Name, allPostingsKey.Value) - // Make sure postings gotten before the delete have the old data when - // iterated over. - expanded, err := ExpandPostings(before) - require.NoError(t, err) - require.Equal(t, []storage.SeriesRef{1, 2, 3}, expanded) + // Make sure postings gotten before the delete have the old data when + // iterated over. + expanded, err := ExpandPostings(before) + require.NoError(t, err) + require.Equal(t, []storage.SeriesRef{1, 2, 3}, expanded) - // Make sure postings gotten after the delete have the new data when - // iterated over. - expanded, err = ExpandPostings(after) - require.NoError(t, err) - require.Equal(t, []storage.SeriesRef{1, 3}, expanded) + // Make sure postings gotten after the delete have the new data when + // iterated over. + expanded, err = ExpandPostings(after) + require.NoError(t, err) + require.Equal(t, []storage.SeriesRef{1, 3}, expanded) - deleted := p.Get("lbl1", "b") - expanded, err = ExpandPostings(deleted) - require.NoError(t, err) - require.Empty(t, expanded, "expected empty postings, got %v", expanded) + deleted := p.Get("lbl1", "b") + expanded, err = ExpandPostings(deleted) + require.NoError(t, err) + require.Empty(t, expanded, "expected empty postings, got %v", expanded) + }) + + t.Run("all postings", func(t *testing.T) { + p := NewMemPostings() + p.Add(1, labels.FromStrings("lbl1", "a")) + p.Add(2, labels.FromStrings("lbl1", "b")) + p.Add(3, labels.FromStrings("lbl2", "a")) + + deletedRefs := map[storage.SeriesRef]struct{}{1: {}, 2: {}, 3: {}} + affectedLabels := map[labels.Label]struct{}{ + {Name: "lbl1", Value: "a"}: {}, + {Name: "lbl1", Value: "b"}: {}, + {Name: "lbl1", Value: "c"}: {}, + } + p.Delete(deletedRefs, affectedLabels) + after := p.Get(allPostingsKey.Name, allPostingsKey.Value) + expanded, err := ExpandPostings(after) + require.NoError(t, err) + require.Empty(t, expanded) + }) + + t.Run("nothing on empty mempostings", func(t *testing.T) { + p := NewMemPostings() + deletedRefs := map[storage.SeriesRef]struct{}{} + affectedLabels := map[labels.Label]struct{}{} + p.Delete(deletedRefs, affectedLabels) + after := p.Get(allPostingsKey.Name, allPostingsKey.Value) + expanded, err := ExpandPostings(after) + require.NoError(t, err) + require.Empty(t, expanded) + }) } // BenchmarkMemPostings_Delete is quite heavy, so consider running it with From b8e5b7cda9f9c589a3d4d9cb356a7fcf77d9cf17 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 27 Sep 2024 11:20:45 +0100 Subject: [PATCH 15/74] [REFACTOR] PromQL: remove label_join and label_replace stubs These functions operate on whole series, not on samples, so they do not fit into the table of functions that return a Vector. Remove the stub entries that were left to help downstream users of the code identify what changed. We cannot remove the entries from the `FunctionCalls` map without breaking `TestFunctionList`, so put some nils in to keep it happy. Signed-off-by: Bryan Boreham --- promql/functions.go | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index c4a7ee4a4..4333cb5ce 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1514,11 +1514,6 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio return matrix, ws } -// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) === -func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // === Vector(s Scalar) (Vector, Annotations) === func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, @@ -1570,11 +1565,6 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) return matrix, ws } -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) === -func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // Common code for date related functions. func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { if len(vals) == 0 { @@ -1696,8 +1686,8 @@ var FunctionCalls = map[string]FunctionCall{ "idelta": funcIdelta, "increase": funcIncrease, "irate": funcIrate, - "label_replace": funcLabelReplace, - "label_join": funcLabelJoin, + "label_replace": nil, // evalLabelReplace not called via this map. + "label_join": nil, // evalLabelJoin not called via this map. "ln": funcLn, "log10": funcLog10, "log2": funcLog2, From 6cde0096e21b5852d9224f4f83ac630394038440 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 20 Sep 2024 13:29:34 +0200 Subject: [PATCH 16/74] Add notifications to the web UI when configuration reload fails. This commit introduces a new `/api/v1/notifications/live` endpoint that utilizes Server-Sent Events (SSE) to stream notifications to the web UI. This is used to display alerts such as when a configuration reload has failed. I opted for SSE over WebSockets because SSE is simpler to implement and more robust for our use case. Since we only need one-way communication from the server to the client, SSE fits perfectly without the overhead of establishing and maintaining a two-way WebSocket connection. When the SSE connection fails, we go back to a classic /api/v1/notifications API endpoint. This commit also contains the required UI changes for the new Mantine UI. Signed-off-by: Julien --- cmd/prometheus/main.go | 29 ++- web/api/notifications.go | 176 ++++++++++++++++ web/api/notifications_test.go | 192 ++++++++++++++++++ web/api/v1/api.go | 105 +++++++--- web/api/v1/errors_test.go | 2 + web/ui/mantine-ui/src/App.tsx | 77 +++---- web/ui/mantine-ui/src/api/api.ts | 3 + .../src/api/responseTypes/notifications.ts | 8 + .../src/components/NotificationsIcon.tsx | 62 ++++++ .../src/components/NotificationsProvider.tsx | 61 ++++++ .../mantine-ui/src/state/useNotifications.ts | 17 ++ web/web.go | 5 + 12 files changed, 668 insertions(+), 69 deletions(-) create mode 100644 web/api/notifications.go create mode 100644 web/api/notifications_test.go create mode 100644 web/ui/mantine-ui/src/api/responseTypes/notifications.ts create mode 100644 web/ui/mantine-ui/src/components/NotificationsIcon.tsx create mode 100644 web/ui/mantine-ui/src/components/NotificationsProvider.tsx create mode 100644 web/ui/mantine-ui/src/state/useNotifications.ts diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e7fd82e6f..176ebd5b5 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -79,6 +79,7 @@ import ( "github.com/prometheus/prometheus/util/logging" prom_runtime "github.com/prometheus/prometheus/util/runtime" "github.com/prometheus/prometheus/web" + "github.com/prometheus/prometheus/web/api" ) var ( @@ -277,13 +278,17 @@ func main() { ) } + notifs := api.NewNotifications(prometheus.DefaultRegisterer) + cfg := flagConfig{ notifier: notifier.Options{ Registerer: prometheus.DefaultRegisterer, }, web: web.Options{ - Registerer: prometheus.DefaultRegisterer, - Gatherer: prometheus.DefaultGatherer, + Registerer: prometheus.DefaultRegisterer, + Gatherer: prometheus.DefaultGatherer, + NotificationsSub: notifs.Sub, + NotificationsGetter: notifs.Get, }, promlogConfig: promlog.Config{}, } @@ -1082,6 +1087,14 @@ func main() { } } + callback := func(success bool) { + if success { + notifs.DeleteNotification(api.ConfigurationUnsuccessful) + return + } + notifs.AddNotification(api.ConfigurationUnsuccessful) + } + g.Add( func() error { <-reloadReady.C @@ -1089,7 +1102,7 @@ func main() { for { select { case <-hup: - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { level.Error(logger).Log("msg", "Error reloading config", "err", err) } else if cfg.enableAutoReload { if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { @@ -1099,7 +1112,7 @@ func main() { } } case rc := <-webHandler.Reload(): - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { level.Error(logger).Log("msg", "Error reloading config", "err", err) rc <- err } else { @@ -1124,7 +1137,7 @@ func main() { } level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.") - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { level.Error(logger).Log("msg", "Error reloading config", "err", err) } else { checksum = currentChecksum @@ -1154,7 +1167,7 @@ func main() { return nil } - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil { return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err) } @@ -1380,7 +1393,7 @@ type reloader struct { reloader func(*config.Config) error } -func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { +func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) { start := time.Now() timings := []interface{}{} level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) @@ -1389,8 +1402,10 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b if err == nil { configSuccess.Set(1) configSuccessTime.SetToCurrentTime() + callback(true) } else { configSuccess.Set(0) + callback(false) } }() diff --git a/web/api/notifications.go b/web/api/notifications.go new file mode 100644 index 000000000..47f29f6eb --- /dev/null +++ b/web/api/notifications.go @@ -0,0 +1,176 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + ConfigurationUnsuccessful = "Configuration reload has failed." +) + +// Notification represents an individual notification message. +type Notification struct { + Text string `json:"text"` + Date time.Time `json:"date"` + Active bool `json:"active"` +} + +// Notifications stores a list of Notification objects. +// It also manages live subscribers that receive notifications via channels. +type Notifications struct { + mu sync.Mutex + notifications []Notification + subscribers map[chan Notification]struct{} // Active subscribers. + + subscriberGauge prometheus.Gauge + notificationsSent prometheus.Counter + notificationsDropped prometheus.Counter +} + +// NewNotifications creates a new Notifications instance. +func NewNotifications(reg prometheus.Registerer) *Notifications { + n := &Notifications{ + subscribers: make(map[chan Notification]struct{}), + subscriberGauge: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_active_subscribers", + Help: "The current number of active notification subscribers.", + }), + notificationsSent: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_updates_sent_total", + Help: "Total number of notification updates sent.", + }), + notificationsDropped: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_updates_dropped_total", + Help: "Total number of notification updates dropped.", + }), + } + + if reg != nil { + reg.MustRegister(n.subscriberGauge, n.notificationsSent, n.notificationsDropped) + } + + return n +} + +// AddNotification adds a new notification or updates the timestamp if it already exists. +func (n *Notifications) AddNotification(text string) { + n.mu.Lock() + defer n.mu.Unlock() + + for i, notification := range n.notifications { + if notification.Text == text { + n.notifications[i].Date = time.Now() + + n.notifySubscribers(n.notifications[i]) + return + } + } + + newNotification := Notification{ + Text: text, + Date: time.Now(), + Active: true, + } + n.notifications = append(n.notifications, newNotification) + + n.notifySubscribers(newNotification) +} + +// notifySubscribers sends a notification to all active subscribers. +func (n *Notifications) notifySubscribers(notification Notification) { + for sub := range n.subscribers { + // Non-blocking send to avoid subscriber blocking issues. + n.notificationsSent.Inc() + select { + case sub <- notification: + // Notification sent to the subscriber. + default: + // Drop the notification if the subscriber's channel is full. + n.notificationsDropped.Inc() + } + } +} + +// DeleteNotification removes the first notification that matches the provided text. +// The deleted notification is sent to subscribers with Active: false before being removed. +func (n *Notifications) DeleteNotification(text string) { + n.mu.Lock() + defer n.mu.Unlock() + + // Iterate through the notifications to find the matching text. + for i, notification := range n.notifications { + if notification.Text == text { + // Mark the notification as inactive and notify subscribers. + notification.Active = false + n.notifySubscribers(notification) + + // Remove the notification from the list. + n.notifications = append(n.notifications[:i], n.notifications[i+1:]...) + return + } + } +} + +// Get returns a copy of the list of notifications for safe access outside the struct. +func (n *Notifications) Get() []Notification { + n.mu.Lock() + defer n.mu.Unlock() + + // Return a copy of the notifications slice to avoid modifying the original slice outside. + notificationsCopy := make([]Notification, len(n.notifications)) + copy(notificationsCopy, n.notifications) + return notificationsCopy +} + +// Sub allows a client to subscribe to live notifications. +// It returns a channel where the subscriber will receive notifications and a function to unsubscribe. +// Each subscriber has its own goroutine to handle notifications and prevent blocking. +func (n *Notifications) Sub() (<-chan Notification, func()) { + ch := make(chan Notification, 10) // Buffered channel to prevent blocking. + + n.mu.Lock() + // Add the new subscriber to the list. + n.subscribers[ch] = struct{}{} + n.subscriberGauge.Set(float64(len(n.subscribers))) + + // Send all current notifications to the new subscriber. + for _, notification := range n.notifications { + ch <- notification + } + n.mu.Unlock() + + // Unsubscribe function to remove the channel from subscribers. + unsubscribe := func() { + n.mu.Lock() + defer n.mu.Unlock() + + // Close the channel and remove it from the subscribers map. + close(ch) + delete(n.subscribers, ch) + n.subscriberGauge.Set(float64(len(n.subscribers))) + } + + return ch, unsubscribe +} diff --git a/web/api/notifications_test.go b/web/api/notifications_test.go new file mode 100644 index 000000000..7aa596163 --- /dev/null +++ b/web/api/notifications_test.go @@ -0,0 +1,192 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestNotificationLifecycle tests adding, modifying, and deleting notifications. +func TestNotificationLifecycle(t *testing.T) { + notifs := NewNotifications(nil) + + // Add a notification. + notifs.AddNotification("Test Notification 1") + + // Check if the notification was added. + notifications := notifs.Get() + require.Len(t, notifications, 1, "Expected 1 notification after addition.") + require.Equal(t, "Test Notification 1", notifications[0].Text, "Notification text mismatch.") + require.True(t, notifications[0].Active, "Expected notification to be active.") + + // Modify the notification. + notifs.AddNotification("Test Notification 1") + notifications = notifs.Get() + require.Len(t, notifications, 1, "Expected 1 notification after modification.") + + // Delete the notification. + notifs.DeleteNotification("Test Notification 1") + notifications = notifs.Get() + require.Empty(t, notifications, "Expected no notifications after deletion.") +} + +// TestSubscriberReceivesNotifications tests that a subscriber receives notifications, including modifications and deletions. +func TestSubscriberReceivesNotifications(t *testing.T) { + notifs := NewNotifications(nil) + + // Subscribe to notifications. + sub, unsubscribe := notifs.Sub() + + var wg sync.WaitGroup + wg.Add(1) + + receivedNotifications := make([]Notification, 0) + + // Goroutine to listen for notifications. + go func() { + defer wg.Done() + for notification := range sub { + receivedNotifications = append(receivedNotifications, notification) + } + }() + + // Add notifications. + notifs.AddNotification("Test Notification 1") + notifs.AddNotification("Test Notification 2") + + // Modify a notification. + notifs.AddNotification("Test Notification 1") + + // Delete a notification. + notifs.DeleteNotification("Test Notification 2") + + // Wait for notifications to propagate. + time.Sleep(100 * time.Millisecond) + + unsubscribe() + wg.Wait() // Wait for the subscriber goroutine to finish. + + // Verify that we received the expected number of notifications. + require.Len(t, receivedNotifications, 4, "Expected 4 notifications (2 active, 1 modified, 1 deleted).") + + // Check the content and state of received notifications. + expected := []struct { + Text string + Active bool + }{ + {"Test Notification 1", true}, + {"Test Notification 2", true}, + {"Test Notification 1", true}, + {"Test Notification 2", false}, + } + + for i, n := range receivedNotifications { + require.Equal(t, expected[i].Text, n.Text, "Notification text mismatch at index %d.", i) + require.Equal(t, expected[i].Active, n.Active, "Notification active state mismatch at index %d.", i) + } +} + +// TestMultipleSubscribers tests that multiple subscribers receive notifications independently. +func TestMultipleSubscribers(t *testing.T) { + notifs := NewNotifications(nil) + + // Subscribe two subscribers to notifications. + sub1, unsubscribe1 := notifs.Sub() + + sub2, unsubscribe2 := notifs.Sub() + + var wg sync.WaitGroup + wg.Add(2) + + receivedSub1 := make([]Notification, 0) + receivedSub2 := make([]Notification, 0) + + // Goroutine for subscriber 1. + go func() { + defer wg.Done() + for notification := range sub1 { + receivedSub1 = append(receivedSub1, notification) + } + }() + + // Goroutine for subscriber 2. + go func() { + defer wg.Done() + for notification := range sub2 { + receivedSub2 = append(receivedSub2, notification) + } + }() + + // Add and delete notifications. + notifs.AddNotification("Test Notification 1") + notifs.DeleteNotification("Test Notification 1") + + // Wait for notifications to propagate. + time.Sleep(100 * time.Millisecond) + + // Unsubscribe both. + unsubscribe1() + unsubscribe2() + + wg.Wait() + + // Both subscribers should have received the same 2 notifications. + require.Len(t, receivedSub1, 2, "Expected 2 notifications for subscriber 1.") + require.Len(t, receivedSub2, 2, "Expected 2 notifications for subscriber 2.") + + // Verify that both subscribers received the same notifications. + for i := 0; i < 2; i++ { + require.Equal(t, receivedSub1[i], receivedSub2[i], "Subscriber notification mismatch at index %d.", i) + } +} + +// TestUnsubscribe tests that unsubscribing prevents further notifications from being received. +func TestUnsubscribe(t *testing.T) { + notifs := NewNotifications(nil) + + // Subscribe to notifications. + sub, unsubscribe := notifs.Sub() + + var wg sync.WaitGroup + wg.Add(1) + + receivedNotifications := make([]Notification, 0) + + // Goroutine to listen for notifications. + go func() { + defer wg.Done() + for notification := range sub { + receivedNotifications = append(receivedNotifications, notification) + } + }() + + // Add a notification and then unsubscribe. + notifs.AddNotification("Test Notification 1") + time.Sleep(100 * time.Millisecond) // Allow time for notification delivery. + unsubscribe() // Unsubscribe. + + // Add another notification after unsubscribing. + notifs.AddNotification("Test Notification 2") + + // Wait for the subscriber goroutine to finish. + wg.Wait() + + // Only the first notification should have been received. + require.Len(t, receivedNotifications, 1, "Expected 1 notification before unsubscribe.") + require.Equal(t, "Test Notification 1", receivedNotifications[0].Text, "Unexpected notification text.") +} diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 0ec8467fa..5eadbdbe7 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -15,6 +15,7 @@ package v1 import ( "context" + "encoding/json" "errors" "fmt" "math" @@ -54,6 +55,7 @@ import ( "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/httputil" "github.com/prometheus/prometheus/util/stats" + "github.com/prometheus/prometheus/web/api" ) type status string @@ -202,16 +204,18 @@ type API struct { ready func(http.HandlerFunc) http.HandlerFunc globalURLOptions GlobalURLOptions - db TSDBAdminStats - dbDir string - enableAdmin bool - logger log.Logger - CORSOrigin *regexp.Regexp - buildInfo *PrometheusVersion - runtimeInfo func() (RuntimeInfo, error) - gatherer prometheus.Gatherer - isAgent bool - statsRenderer StatsRenderer + db TSDBAdminStats + dbDir string + enableAdmin bool + logger log.Logger + CORSOrigin *regexp.Regexp + buildInfo *PrometheusVersion + runtimeInfo func() (RuntimeInfo, error) + gatherer prometheus.Gatherer + isAgent bool + statsRenderer StatsRenderer + notificationsGetter func() []api.Notification + notificationsSub func() (<-chan api.Notification, func()) remoteWriteHandler http.Handler remoteReadHandler http.Handler @@ -245,6 +249,8 @@ func NewAPI( corsOrigin *regexp.Regexp, runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, + notificationsGetter func() []api.Notification, + notificationsSub func() (<-chan api.Notification, func()), gatherer prometheus.Gatherer, registerer prometheus.Registerer, statsRenderer StatsRenderer, @@ -261,22 +267,24 @@ func NewAPI( targetRetriever: tr, alertmanagerRetriever: ar, - now: time.Now, - config: configFunc, - flagsMap: flagsMap, - ready: readyFunc, - globalURLOptions: globalURLOptions, - db: db, - dbDir: dbDir, - enableAdmin: enableAdmin, - rulesRetriever: rr, - logger: logger, - CORSOrigin: corsOrigin, - runtimeInfo: runtimeInfo, - buildInfo: buildInfo, - gatherer: gatherer, - isAgent: isAgent, - statsRenderer: DefaultStatsRenderer, + now: time.Now, + config: configFunc, + flagsMap: flagsMap, + ready: readyFunc, + globalURLOptions: globalURLOptions, + db: db, + dbDir: dbDir, + enableAdmin: enableAdmin, + rulesRetriever: rr, + logger: logger, + CORSOrigin: corsOrigin, + runtimeInfo: runtimeInfo, + buildInfo: buildInfo, + gatherer: gatherer, + isAgent: isAgent, + statsRenderer: DefaultStatsRenderer, + notificationsGetter: notificationsGetter, + notificationsSub: notificationsSub, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), } @@ -390,6 +398,8 @@ func (api *API) Register(r *route.Router) { r.Get("/status/flags", wrap(api.serveFlags)) r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus)) r.Get("/status/walreplay", api.serveWALReplayStatus) + r.Get("/notifications", api.notifications) + r.Get("/notifications/live", api.notificationsSSE) r.Post("/read", api.ready(api.remoteRead)) r.Post("/write", api.ready(api.remoteWrite)) r.Post("/otlp/v1/metrics", api.ready(api.otlpWrite)) @@ -1668,6 +1678,49 @@ func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) { }, nil, "") } +func (api *API) notifications(w http.ResponseWriter, r *http.Request) { + httputil.SetCORS(w, api.CORSOrigin, r) + api.respond(w, r, api.notificationsGetter(), nil, "") +} + +func (api *API) notificationsSSE(w http.ResponseWriter, r *http.Request) { + httputil.SetCORS(w, api.CORSOrigin, r) + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + + // Subscribe to notifications. + notifications, unsubscribe := api.notificationsSub() + defer unsubscribe() + + // Set up a flusher to push the response to the client. + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "Streaming unsupported", http.StatusInternalServerError) + return + } + + for { + select { + case notification := <-notifications: + // Marshal the notification to JSON. + jsonData, err := json.Marshal(notification) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + continue + } + + // Write the event data in SSE format with JSON content. + fmt.Fprintf(w, "data: %s\n\n", jsonData) + + // Flush the response to ensure the data is sent immediately. + flusher.Flush() + case <-r.Context().Done(): + return + } + } +} + func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) { // This is only really for tests - this will never be nil IRL. if api.remoteReadHandler != nil { diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index 7e1fc09d8..db16b9fb3 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -134,6 +134,8 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route regexp.MustCompile(".*"), func() (RuntimeInfo, error) { return RuntimeInfo{}, errors.New("not implemented") }, &PrometheusVersion{}, + nil, + nil, prometheus.DefaultGatherer, nil, nil, diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index aa5eb3714..3e3466825 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -64,6 +64,8 @@ import { useAppDispatch } from "./state/hooks"; import { updateSettings, useSettings } from "./state/settingsSlice"; import SettingsMenu from "./components/SettingsMenu"; import ReadinessWrapper from "./components/ReadinessWrapper"; +import NotificationsProvider from "./components/NotificationsProvider"; +import NotificationsIcon from "./components/NotificationsIcon"; import { QueryParamProvider } from "use-query-params"; import { ReactRouter6Adapter } from "use-query-params/adapters/react-router-6"; import ServiceDiscoveryPage from "./pages/service-discovery/ServiceDiscoveryPage"; @@ -314,6 +316,7 @@ function App() { const navActionIcons = ( <> + - - - - - - - - Prometheus{agentMode && " Agent"} + + + + + + + + + Prometheus{agentMode && " Agent"} + + + + {navLinks} - - - {navLinks} + + + {navActionIcons} - - {navActionIcons} - + - - - + - - {navLinks} - - {navActionIcons} - - + + {navLinks} + + {navActionIcons} + + + diff --git a/web/ui/mantine-ui/src/api/api.ts b/web/ui/mantine-ui/src/api/api.ts index d7446d689..f1dd2b8c0 100644 --- a/web/ui/mantine-ui/src/api/api.ts +++ b/web/ui/mantine-ui/src/api/api.ts @@ -93,6 +93,7 @@ type QueryOptions = { path: string; params?: Record; enabled?: boolean; + refetchInterval?: false | number; recordResponseTime?: (time: number) => void; }; @@ -102,6 +103,7 @@ export const useAPIQuery = ({ params, enabled, recordResponseTime, + refetchInterval, }: QueryOptions) => { const { pathPrefix } = useSettings(); @@ -109,6 +111,7 @@ export const useAPIQuery = ({ queryKey: key !== undefined ? key : [path, params], retry: false, refetchOnWindowFocus: false, + refetchInterval: refetchInterval, gcTime: 0, enabled, queryFn: createQueryFn({ pathPrefix, path, params, recordResponseTime }), diff --git a/web/ui/mantine-ui/src/api/responseTypes/notifications.ts b/web/ui/mantine-ui/src/api/responseTypes/notifications.ts new file mode 100644 index 000000000..d6ebf68d4 --- /dev/null +++ b/web/ui/mantine-ui/src/api/responseTypes/notifications.ts @@ -0,0 +1,8 @@ +export interface Notification { + text: string; + date: string; + active: boolean; + modified: boolean; +} + +export type NotificationsResult = Notification[]; diff --git a/web/ui/mantine-ui/src/components/NotificationsIcon.tsx b/web/ui/mantine-ui/src/components/NotificationsIcon.tsx new file mode 100644 index 000000000..5ab28b037 --- /dev/null +++ b/web/ui/mantine-ui/src/components/NotificationsIcon.tsx @@ -0,0 +1,62 @@ +import { ActionIcon, Indicator, Popover, Card, Text, Stack, ScrollArea, Group } from "@mantine/core"; +import { IconBell, IconAlertTriangle, IconNetworkOff } from "@tabler/icons-react"; +import { useNotifications } from '../state/useNotifications'; +import { actionIconStyle } from "../styles"; +import { useSettings } from '../state/settingsSlice'; +import { formatTimestamp } from "../lib/formatTime"; + +const NotificationsIcon = () => { + const { notifications, isConnectionError } = useNotifications(); + const { useLocalTime } = useSettings(); + + return ( + (notifications.length === 0 && !isConnectionError) ? null : ( + + + + + + + + + + + Notifications + + { isConnectionError ? ( + + + + + Real-time notifications interrupted. + Please refresh the page or check your connection. + + + + ) : notifications.length === 0 ? ( + No notifications + ) : (notifications.map((notification, index) => ( + + + + + {notification.text} + {formatTimestamp(new Date(notification.date).valueOf() / 1000, useLocalTime)} + + + + )))} + + + + + + ) + ); +}; + +export default NotificationsIcon; diff --git a/web/ui/mantine-ui/src/components/NotificationsProvider.tsx b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx new file mode 100644 index 000000000..73de54131 --- /dev/null +++ b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx @@ -0,0 +1,61 @@ +import React, { useEffect, useState } from 'react'; +import { useSettings } from '../state/settingsSlice'; +import { NotificationsContext } from '../state/useNotifications'; +import { Notification, NotificationsResult } from "../api/responseTypes/notifications"; +import { useAPIQuery } from '../api/api'; + +export const NotificationsProvider: React.FC<{ children: React.ReactNode }> = ({ children }) => { + const { pathPrefix } = useSettings(); + const [notifications, setNotifications] = useState([]); + const [isConnectionError, setIsConnectionError] = useState(false); + const [shouldFetchFromAPI, setShouldFetchFromAPI] = useState(false); + + const { data, isError } = useAPIQuery({ + path: '/notifications', + enabled: shouldFetchFromAPI, + refetchInterval: 10000, + }); + + useEffect(() => { + if (data && data.data) { + setNotifications(data.data); + } + setIsConnectionError(isError); + }, [data, isError]); + + useEffect(() => { + const eventSource = new EventSource(`${pathPrefix}/api/v1/notifications/live`); + + eventSource.onmessage = (event) => { + const notification: Notification = JSON.parse(event.data); + + setNotifications((prev: Notification[]) => { + const updatedNotifications = [...prev.filter((n: Notification) => n.text !== notification.text)]; + + if (notification.active) { + updatedNotifications.push(notification); + } + + return updatedNotifications; + }); + }; + + eventSource.onerror = () => { + eventSource.close(); + setIsConnectionError(true); + setShouldFetchFromAPI(true); + }; + + return () => { + eventSource.close(); + }; + }, [pathPrefix]); + + return ( + + {children} + + ); +}; + +export default NotificationsProvider; diff --git a/web/ui/mantine-ui/src/state/useNotifications.ts b/web/ui/mantine-ui/src/state/useNotifications.ts new file mode 100644 index 000000000..40a3f0920 --- /dev/null +++ b/web/ui/mantine-ui/src/state/useNotifications.ts @@ -0,0 +1,17 @@ +import { createContext, useContext } from 'react'; +import { Notification } from "../api/responseTypes/notifications"; + +export type NotificationsContextType = { + notifications: Notification[]; + isConnectionError: boolean; +}; + +const defaultContextValue: NotificationsContextType = { + notifications: [], + isConnectionError: false, +}; + +export const NotificationsContext = createContext(defaultContextValue); + +// Custom hook to access notifications context +export const useNotifications = () => useContext(NotificationsContext); diff --git a/web/web.go b/web/web.go index 6b0d9cd18..87e4164c5 100644 --- a/web/web.go +++ b/web/web.go @@ -59,6 +59,7 @@ import ( "github.com/prometheus/prometheus/template" "github.com/prometheus/prometheus/util/httputil" "github.com/prometheus/prometheus/util/netconnlimit" + "github.com/prometheus/prometheus/web/api" api_v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/prometheus/prometheus/web/ui" ) @@ -266,6 +267,8 @@ type Options struct { RuleManager *rules.Manager Notifier *notifier.Manager Version *PrometheusVersion + NotificationsGetter func() []api.Notification + NotificationsSub func() (<-chan api.Notification, func()) Flags map[string]string ListenAddresses []string @@ -376,6 +379,8 @@ func New(logger log.Logger, o *Options) *Handler { h.options.CORSOrigin, h.runtimeInfo, h.versionInfo, + h.options.NotificationsGetter, + h.options.NotificationsSub, o.Gatherer, o.Registerer, nil, From f9bbad1148db0300977cd666a76a9d5609c884b6 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 27 Sep 2024 13:51:50 +0200 Subject: [PATCH 17/74] Limit the number of SSE Subscribers to 16 by default Signed-off-by: Julien --- cmd/prometheus/main.go | 52 ++++++++++--------- docs/command-line/prometheus.md | 1 + web/api/notifications.go | 25 +++++---- web/api/notifications_test.go | 47 ++++++++++++++--- web/api/v1/api.go | 10 ++-- .../src/components/NotificationsProvider.tsx | 3 +- web/web.go | 2 +- 7 files changed, 94 insertions(+), 46 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index dd068b86c..f39eba3c3 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -135,24 +135,25 @@ func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagCla type flagConfig struct { configFile string - agentStoragePath string - serverStoragePath string - notifier notifier.Options - forGracePeriod model.Duration - outageTolerance model.Duration - resendDelay model.Duration - maxConcurrentEvals int64 - web web.Options - scrape scrape.Options - tsdb tsdbOptions - agent agentOptions - lookbackDelta model.Duration - webTimeout model.Duration - queryTimeout model.Duration - queryConcurrency int - queryMaxSamples int - RemoteFlushDeadline model.Duration - nameEscapingScheme string + agentStoragePath string + serverStoragePath string + notifier notifier.Options + forGracePeriod model.Duration + outageTolerance model.Duration + resendDelay model.Duration + maxConcurrentEvals int64 + web web.Options + scrape scrape.Options + tsdb tsdbOptions + agent agentOptions + lookbackDelta model.Duration + webTimeout model.Duration + queryTimeout model.Duration + queryConcurrency int + queryMaxSamples int + RemoteFlushDeadline model.Duration + nameEscapingScheme string + maxNotificationsSubscribers int enableAutoReload bool autoReloadInterval model.Duration @@ -274,17 +275,13 @@ func main() { ) } - notifs := api.NewNotifications(prometheus.DefaultRegisterer) - cfg := flagConfig{ notifier: notifier.Options{ Registerer: prometheus.DefaultRegisterer, }, web: web.Options{ - Registerer: prometheus.DefaultRegisterer, - Gatherer: prometheus.DefaultGatherer, - NotificationsSub: notifs.Sub, - NotificationsGetter: notifs.Get, + Registerer: prometheus.DefaultRegisterer, + Gatherer: prometheus.DefaultGatherer, }, promlogConfig: promlog.Config{}, } @@ -319,6 +316,9 @@ func main() { a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners."). Default("512").IntVar(&cfg.web.MaxConnections) + a.Flag("web.max-notifications-subscribers", "Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close."). + Default("16").IntVar(&cfg.maxNotificationsSubscribers) + a.Flag("web.external-url", "The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically."). PlaceHolder("").StringVar(&cfg.prometheusURL) @@ -500,6 +500,10 @@ func main() { logger := promlog.New(&cfg.promlogConfig) + notifs := api.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) + cfg.web.NotificationsSub = notifs.Sub + cfg.web.NotificationsGetter = notifs.Get + if err := cfg.setFeatureListOptions(logger); err != nil { fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) os.Exit(1) diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 7737b5021..eacb45ad0 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -21,6 +21,7 @@ The Prometheus monitoring server | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | --web.read-timeout | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | | --web.max-connections | Maximum number of simultaneous connections across all listeners. | `512` | +| --web.max-notifications-subscribers | Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close. | `16` | | --web.external-url | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | | | --web.route-prefix | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | | | --web.user-assets | Path to static asset directory, available at /user. | | diff --git a/web/api/notifications.go b/web/api/notifications.go index 47f29f6eb..976f0b076 100644 --- a/web/api/notifications.go +++ b/web/api/notifications.go @@ -34,9 +34,10 @@ type Notification struct { // Notifications stores a list of Notification objects. // It also manages live subscribers that receive notifications via channels. type Notifications struct { - mu sync.Mutex - notifications []Notification - subscribers map[chan Notification]struct{} // Active subscribers. + mu sync.Mutex + notifications []Notification + subscribers map[chan Notification]struct{} // Active subscribers. + maxSubscribers int subscriberGauge prometheus.Gauge notificationsSent prometheus.Counter @@ -44,9 +45,10 @@ type Notifications struct { } // NewNotifications creates a new Notifications instance. -func NewNotifications(reg prometheus.Registerer) *Notifications { +func NewNotifications(maxSubscribers int, reg prometheus.Registerer) *Notifications { n := &Notifications{ - subscribers: make(map[chan Notification]struct{}), + subscribers: make(map[chan Notification]struct{}), + maxSubscribers: maxSubscribers, subscriberGauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "prometheus", Subsystem: "api", @@ -147,10 +149,16 @@ func (n *Notifications) Get() []Notification { // Sub allows a client to subscribe to live notifications. // It returns a channel where the subscriber will receive notifications and a function to unsubscribe. // Each subscriber has its own goroutine to handle notifications and prevent blocking. -func (n *Notifications) Sub() (<-chan Notification, func()) { +func (n *Notifications) Sub() (<-chan Notification, func(), bool) { + n.mu.Lock() + defer n.mu.Unlock() + + if len(n.subscribers) >= n.maxSubscribers { + return nil, nil, false + } + ch := make(chan Notification, 10) // Buffered channel to prevent blocking. - n.mu.Lock() // Add the new subscriber to the list. n.subscribers[ch] = struct{}{} n.subscriberGauge.Set(float64(len(n.subscribers))) @@ -159,7 +167,6 @@ func (n *Notifications) Sub() (<-chan Notification, func()) { for _, notification := range n.notifications { ch <- notification } - n.mu.Unlock() // Unsubscribe function to remove the channel from subscribers. unsubscribe := func() { @@ -172,5 +179,5 @@ func (n *Notifications) Sub() (<-chan Notification, func()) { n.subscriberGauge.Set(float64(len(n.subscribers))) } - return ch, unsubscribe + return ch, unsubscribe, true } diff --git a/web/api/notifications_test.go b/web/api/notifications_test.go index 7aa596163..437ff1ec4 100644 --- a/web/api/notifications_test.go +++ b/web/api/notifications_test.go @@ -23,7 +23,7 @@ import ( // TestNotificationLifecycle tests adding, modifying, and deleting notifications. func TestNotificationLifecycle(t *testing.T) { - notifs := NewNotifications(nil) + notifs := NewNotifications(10, nil) // Add a notification. notifs.AddNotification("Test Notification 1") @@ -47,10 +47,11 @@ func TestNotificationLifecycle(t *testing.T) { // TestSubscriberReceivesNotifications tests that a subscriber receives notifications, including modifications and deletions. func TestSubscriberReceivesNotifications(t *testing.T) { - notifs := NewNotifications(nil) + notifs := NewNotifications(10, nil) // Subscribe to notifications. - sub, unsubscribe := notifs.Sub() + sub, unsubscribe, ok := notifs.Sub() + require.True(t, ok) var wg sync.WaitGroup wg.Add(1) @@ -103,12 +104,14 @@ func TestSubscriberReceivesNotifications(t *testing.T) { // TestMultipleSubscribers tests that multiple subscribers receive notifications independently. func TestMultipleSubscribers(t *testing.T) { - notifs := NewNotifications(nil) + notifs := NewNotifications(10, nil) // Subscribe two subscribers to notifications. - sub1, unsubscribe1 := notifs.Sub() + sub1, unsubscribe1, ok1 := notifs.Sub() + require.True(t, ok1) - sub2, unsubscribe2 := notifs.Sub() + sub2, unsubscribe2, ok2 := notifs.Sub() + require.True(t, ok2) var wg sync.WaitGroup wg.Add(2) @@ -157,10 +160,11 @@ func TestMultipleSubscribers(t *testing.T) { // TestUnsubscribe tests that unsubscribing prevents further notifications from being received. func TestUnsubscribe(t *testing.T) { - notifs := NewNotifications(nil) + notifs := NewNotifications(10, nil) // Subscribe to notifications. - sub, unsubscribe := notifs.Sub() + sub, unsubscribe, ok := notifs.Sub() + require.True(t, ok) var wg sync.WaitGroup wg.Add(1) @@ -190,3 +194,30 @@ func TestUnsubscribe(t *testing.T) { require.Len(t, receivedNotifications, 1, "Expected 1 notification before unsubscribe.") require.Equal(t, "Test Notification 1", receivedNotifications[0].Text, "Unexpected notification text.") } + +// TestMaxSubscribers tests that exceeding the max subscribers limit prevents additional subscriptions. +func TestMaxSubscribers(t *testing.T) { + maxSubscribers := 2 + notifs := NewNotifications(maxSubscribers, nil) + + // Subscribe the maximum number of subscribers. + _, unsubscribe1, ok1 := notifs.Sub() + require.True(t, ok1, "Expected first subscription to succeed.") + + _, unsubscribe2, ok2 := notifs.Sub() + require.True(t, ok2, "Expected second subscription to succeed.") + + // Try to subscribe more than the max allowed. + _, _, ok3 := notifs.Sub() + require.False(t, ok3, "Expected third subscription to fail due to max subscriber limit.") + + // Unsubscribe one subscriber and try again. + unsubscribe1() + + _, unsubscribe4, ok4 := notifs.Sub() + require.True(t, ok4, "Expected subscription to succeed after unsubscribing a subscriber.") + + // Clean up the subscriptions. + unsubscribe2() + unsubscribe4() +} diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 5eadbdbe7..4589e14e0 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -215,7 +215,7 @@ type API struct { isAgent bool statsRenderer StatsRenderer notificationsGetter func() []api.Notification - notificationsSub func() (<-chan api.Notification, func()) + notificationsSub func() (<-chan api.Notification, func(), bool) remoteWriteHandler http.Handler remoteReadHandler http.Handler @@ -250,7 +250,7 @@ func NewAPI( runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, notificationsGetter func() []api.Notification, - notificationsSub func() (<-chan api.Notification, func()), + notificationsSub func() (<-chan api.Notification, func(), bool), gatherer prometheus.Gatherer, registerer prometheus.Registerer, statsRenderer StatsRenderer, @@ -1690,7 +1690,11 @@ func (api *API) notificationsSSE(w http.ResponseWriter, r *http.Request) { w.Header().Set("Connection", "keep-alive") // Subscribe to notifications. - notifications, unsubscribe := api.notificationsSub() + notifications, unsubscribe, ok := api.notificationsSub() + if !ok { + w.WriteHeader(http.StatusNoContent) + return + } defer unsubscribe() // Set up a flusher to push the response to the client. diff --git a/web/ui/mantine-ui/src/components/NotificationsProvider.tsx b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx index 73de54131..44510061e 100644 --- a/web/ui/mantine-ui/src/components/NotificationsProvider.tsx +++ b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx @@ -42,7 +42,8 @@ export const NotificationsProvider: React.FC<{ children: React.ReactNode }> = ({ eventSource.onerror = () => { eventSource.close(); - setIsConnectionError(true); + // We do not call setIsConnectionError(true), we only set it to true if + // the fallback API does not work either. setShouldFetchFromAPI(true); }; diff --git a/web/web.go b/web/web.go index 87e4164c5..724ca9105 100644 --- a/web/web.go +++ b/web/web.go @@ -268,7 +268,7 @@ type Options struct { Notifier *notifier.Manager Version *PrometheusVersion NotificationsGetter func() []api.Notification - NotificationsSub func() (<-chan api.Notification, func()) + NotificationsSub func() (<-chan api.Notification, func(), bool) Flags map[string]string ListenAddresses []string From e34563bfe0ac78d81d3147aed9e03789945e1c74 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 27 Sep 2024 15:58:41 +0200 Subject: [PATCH 18/74] Retry SSE connection unless max clients have been reached. This switches from the prehistoric EventSource API to the more modern fetch-event-source package. That packages gives us full control over the retries. It also gives us the opportunity to close the event source when the browser tab is hidden, saving resources. Signed-off-by: Julien --- web/api/v1/api.go | 4 ++ web/ui/mantine-ui/package.json | 1 + .../src/components/NotificationsProvider.tsx | 57 ++++++++++++------- web/ui/package-lock.json | 6 ++ 4 files changed, 48 insertions(+), 20 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 4589e14e0..d3cc7d718 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1704,6 +1704,10 @@ func (api *API) notificationsSSE(w http.ResponseWriter, r *http.Request) { return } + // Flush the response to ensure the headers are immediately and eventSource + // onopen is triggered client-side. + flusher.Flush() + for { select { case notification := <-notifications: diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index ec8ef8902..aae8ba99b 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -25,6 +25,7 @@ "@mantine/dates": "^7.11.2", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.11.2", + "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0-beta.0", diff --git a/web/ui/mantine-ui/src/components/NotificationsProvider.tsx b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx index 44510061e..a331e524b 100644 --- a/web/ui/mantine-ui/src/components/NotificationsProvider.tsx +++ b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx @@ -3,6 +3,7 @@ import { useSettings } from '../state/settingsSlice'; import { NotificationsContext } from '../state/useNotifications'; import { Notification, NotificationsResult } from "../api/responseTypes/notifications"; import { useAPIQuery } from '../api/api'; +import { fetchEventSource } from '@microsoft/fetch-event-source'; export const NotificationsProvider: React.FC<{ children: React.ReactNode }> = ({ children }) => { const { pathPrefix } = useSettings(); @@ -24,31 +25,47 @@ export const NotificationsProvider: React.FC<{ children: React.ReactNode }> = ({ }, [data, isError]); useEffect(() => { - const eventSource = new EventSource(`${pathPrefix}/api/v1/notifications/live`); - - eventSource.onmessage = (event) => { - const notification: Notification = JSON.parse(event.data); - - setNotifications((prev: Notification[]) => { - const updatedNotifications = [...prev.filter((n: Notification) => n.text !== notification.text)]; - - if (notification.active) { - updatedNotifications.push(notification); + const controller = new AbortController(); + fetchEventSource(`${pathPrefix}/api/v1/notifications/live`, { + signal: controller.signal, + async onopen(response) { + if (response.ok) { + if (response.status === 200) { + setNotifications([]); + setIsConnectionError(false); + } else if (response.status === 204) { + controller.abort(); + setShouldFetchFromAPI(true); + } + } else { + setIsConnectionError(true); + throw new Error(`Unexpected response: ${response.status} ${response.statusText}`); } + }, + onmessage(event) { + const notification: Notification = JSON.parse(event.data); - return updatedNotifications; - }); - }; + setNotifications((prev: Notification[]) => { + const updatedNotifications = [...prev.filter((n: Notification) => n.text !== notification.text)]; - eventSource.onerror = () => { - eventSource.close(); - // We do not call setIsConnectionError(true), we only set it to true if - // the fallback API does not work either. - setShouldFetchFromAPI(true); - }; + if (notification.active) { + updatedNotifications.push(notification); + } + + return updatedNotifications; + }); + }, + onclose() { + throw new Error("Server closed the connection"); + }, + onerror() { + setIsConnectionError(true); + return 5000; + }, + }); return () => { - eventSource.close(); + controller.abort(); }; }, [pathPrefix]); diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 2dc1fcdfe..49a907480 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -39,6 +39,7 @@ "@mantine/dates": "^7.11.2", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.11.2", + "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0-beta.0", @@ -2255,6 +2256,11 @@ "react": "^18.2.0" } }, + "node_modules/@microsoft/fetch-event-source": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@microsoft/fetch-event-source/-/fetch-event-source-2.0.1.tgz", + "integrity": "sha512-W6CLUJ2eBMw3Rec70qrsEW0jOm/3twwJv21mrmj2yORiaVmVYGS4sSS5yUwvQc1ZlDLYGPnClVWmUUMagKNsfA==" + }, "node_modules/@nexucis/fuzzy": { "version": "0.5.1", "resolved": "https://registry.npmjs.org/@nexucis/fuzzy/-/fuzzy-0.5.1.tgz", From 105ab2e95afa786c7ad21be614a31ff45c143cbb Mon Sep 17 00:00:00 2001 From: Ayoub Mrini Date: Fri, 27 Sep 2024 18:13:51 +0200 Subject: [PATCH 19/74] fix(test): adjust defer invocations (#14996) Signed-off-by: machine424 --- storage/remote/read_test.go | 4 +++- tsdb/agent/db_test.go | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index d63cefc3f..b78a8c621 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -475,7 +475,9 @@ func TestSampleAndChunkQueryableClient(t *testing.T) { ) q, err := c.Querier(tc.mint, tc.maxt) require.NoError(t, err) - defer require.NoError(t, q.Close()) + defer func() { + require.NoError(t, q.Close()) + }() ss := q.Select(context.Background(), true, nil, tc.matchers...) require.NoError(t, err) diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index b31041b1b..f940e1915 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -640,7 +640,9 @@ func Test_ExistingWAL_NextRef(t *testing.T) { // Create a new storage and see what nextRef is initialized to. db, err = Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) require.NoError(t, err) - defer require.NoError(t, db.Close()) + defer func() { + require.NoError(t, db.Close()) + }() require.Equal(t, uint64(seriesCount+histogramCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL") } From 97f32191576f768393e08cef7448f8747344a77a Mon Sep 17 00:00:00 2001 From: machine424 Date: Fri, 27 Sep 2024 13:31:44 +0200 Subject: [PATCH 20/74] test(discovery): add a Configs test showing that the custom unmarshalling/marshalling is broken. This went under the radar because the utils are never called directly. We usually marshall/unmarshal Configs as embeded in a struct using UnmarshalYAMLWithInlineConfigs/MarshalYAMLWithInlineConfigs which bypasses Configs' custom UnmarshalYAML/MarshalYAML Signed-off-by: machine424 --- discovery/discovery_test.go | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 discovery/discovery_test.go diff --git a/discovery/discovery_test.go b/discovery/discovery_test.go new file mode 100644 index 000000000..af327195f --- /dev/null +++ b/discovery/discovery_test.go @@ -0,0 +1,36 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestConfigsCustomUnMarshalMarshal(t *testing.T) { + input := `static_configs: +- targets: + - foo:1234 + - bar:4321 +` + cfg := &Configs{} + err := yaml.UnmarshalStrict([]byte(input), cfg) + require.NoError(t, err) + + output, err := yaml.Marshal(cfg) + require.NoError(t, err) + require.Equal(t, input, string(output)) +} From b5569c40708429a0feabdba24596d80b8617fd81 Mon Sep 17 00:00:00 2001 From: machine424 Date: Fri, 27 Sep 2024 13:40:26 +0200 Subject: [PATCH 21/74] fix(discovery): adjust how type is retrieved in Configs' MarshalYAML/UnmarshalYAML Signed-off-by: machine424 --- discovery/discovery.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/discovery/discovery.go b/discovery/discovery.go index a91faf6c8..9a83df409 100644 --- a/discovery/discovery.go +++ b/discovery/discovery.go @@ -109,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) { // UnmarshalYAML implements yaml.Unmarshaler. func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -124,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { // MarshalYAML implements yaml.Marshaler. func (c Configs) MarshalYAML() (interface{}, error) { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() From b826c43987639e481e8453c6ad99b7e258c63b53 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 30 Sep 2024 14:22:40 +0200 Subject: [PATCH 22/74] Calculate path prefix directly in initial settings Redux value (#14981) Without this, the page that is shown first renders once with an empty path prefix value, since the settings update takes a render cycle to complete. However, we only fetch certain data from the API exactly once for a given page, and not for every re-render with changed path prefix value (and we also wouldn't want to fetch it from the wrong location initially). This duplicates the served endpoint list once more, but exporting them from App.tsx would also have been dirty (hot reload only works when a file only exports one component and nothing else, thus there'd be a linter warning). Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/App.tsx | 32 ++------------------ web/ui/mantine-ui/src/state/settingsSlice.ts | 30 ++++++++++++++++-- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 3e3466825..3bec30fa3 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -56,12 +56,11 @@ import TSDBStatusPage from "./pages/TSDBStatusPage"; import FlagsPage from "./pages/FlagsPage"; import ConfigPage from "./pages/ConfigPage"; import AgentPage from "./pages/AgentPage"; -import { Suspense, useEffect } from "react"; +import { Suspense } from "react"; import ErrorBoundary from "./components/ErrorBoundary"; import { ThemeSelector } from "./components/ThemeSelector"; import { Notifications } from "@mantine/notifications"; -import { useAppDispatch } from "./state/hooks"; -import { updateSettings, useSettings } from "./state/settingsSlice"; +import { useSettings } from "./state/settingsSlice"; import SettingsMenu from "./components/SettingsMenu"; import ReadinessWrapper from "./components/ReadinessWrapper"; import NotificationsProvider from "./components/NotificationsProvider"; @@ -172,37 +171,12 @@ const theme = createTheme({ }, }); -// This dynamically/generically determines the pathPrefix by stripping the first known -// endpoint suffix from the window location path. It works out of the box for both direct -// hosting and reverse proxy deployments with no additional configurations required. -const getPathPrefix = (path: string) => { - if (path.endsWith("/")) { - path = path.slice(0, -1); - } - - const pagePaths = [ - ...mainNavPages, - ...allStatusPages, - { path: "/agent" }, - ].map((p) => p.path); - - const pagePath = pagePaths.find((p) => path.endsWith(p)); - return path.slice(0, path.length - (pagePath || "").length); -}; - const navLinkXPadding = "md"; function App() { const [opened, { toggle }] = useDisclosure(); - const pathPrefix = getPathPrefix(window.location.pathname); - const dispatch = useAppDispatch(); - - useEffect(() => { - dispatch(updateSettings({ pathPrefix })); - }, [pathPrefix, dispatch]); - - const { agentMode, consolesLink } = useSettings(); + const { agentMode, consolesLink, pathPrefix } = useSettings(); const navLinks = ( <> diff --git a/web/ui/mantine-ui/src/state/settingsSlice.ts b/web/ui/mantine-ui/src/state/settingsSlice.ts index 1591c4388..ea744e014 100644 --- a/web/ui/mantine-ui/src/state/settingsSlice.ts +++ b/web/ui/mantine-ui/src/state/settingsSlice.ts @@ -4,7 +4,7 @@ import { initializeFromLocalStorage } from "./initializeFromLocalStorage"; interface Settings { consolesLink: string | null; - lookbackDelta: string, + lookbackDelta: string; agentMode: boolean; ready: boolean; pathPrefix: string; @@ -30,6 +30,32 @@ export const localStorageKeyEnableSyntaxHighlighting = export const localStorageKeyEnableLinter = "settings.enableLinter"; export const localStorageKeyShowAnnotations = "settings.showAnnotations"; +// This dynamically/generically determines the pathPrefix by stripping the first known +// endpoint suffix from the window location path. It works out of the box for both direct +// hosting and reverse proxy deployments with no additional configurations required. +const getPathPrefix = (path: string) => { + if (path.endsWith("/")) { + path = path.slice(0, -1); + } + + const pagePaths = [ + "/query", + "/alerts", + "/targets", + "/rules", + "/service-discovery", + "/status", + "/tsdb-status", + "/flags", + "/config", + "/alertmanager-discovery", + "/agent", + ]; + + const pagePath = pagePaths.find((p) => path.endsWith(p)); + return path.slice(0, path.length - (pagePath || "").length); +}; + export const initialState: Settings = { consolesLink: GLOBAL_CONSOLES_LINK === "CONSOLES_LINK_PLACEHOLDER" || @@ -44,7 +70,7 @@ export const initialState: Settings = { GLOBAL_LOOKBACKDELTA === null ? "" : GLOBAL_LOOKBACKDELTA, - pathPrefix: "", + pathPrefix: getPathPrefix(window.location.pathname), useLocalTime: initializeFromLocalStorage( localStorageKeyUseLocalTime, false From 9bb7fab4ab8e31525984948defdee5d86c55e9d9 Mon Sep 17 00:00:00 2001 From: Levi Harrison Date: Mon, 30 Sep 2024 09:44:41 -0400 Subject: [PATCH 23/74] remove LeviHarrison as default maintainer (#15005) Signed-off-by: Levi Harrison --- MAINTAINERS.md | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 7f4153abc..44c07f063 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -2,7 +2,6 @@ General maintainers: * Bryan Boreham (bjboreham@gmail.com / @bboreham) -* Levi Harrison (levi@leviharrison.dev / @LeviHarrison) * Ayoub Mrini (ayoubmrini424@gmail.com / @machine424) * Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie) From 7a90d73fa63e9ea5cab357951ffcf32c9bc8bfdf Mon Sep 17 00:00:00 2001 From: bas smit Date: Wed, 18 Sep 2024 10:38:47 +0200 Subject: [PATCH 24/74] sd k8s: test for sidecar container support in endpoints This test is expected to fail, the followup will add the feature Signed-off-by: bas smit --- discovery/kubernetes/endpoints_test.go | 164 +++++++++++++++++++++ discovery/kubernetes/endpointslice_test.go | 162 ++++++++++++++++++++ 2 files changed, 326 insertions(+) diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 3ea98c5db..c503448b2 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -1089,3 +1089,167 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) { }, }.Run(t) } + +func TestEndpointsDiscoverySidecarContainer(t *testing.T) { + objs := []runtime.Object{ + &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testsidecar", + Namespace: "default", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: "4.3.2.1", + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9000, + Protocol: v1.ProtocolTCP, + }, + { + Name: "initport", + Port: 9111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + InitContainers: []v1.Container{ + { + Name: "ic1", + Image: "ic1:latest", + Ports: []v1.ContainerPort{ + { + Name: "initport", + ContainerPort: 1111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + { + Name: "ic2", + Image: "ic2:latest", + Ports: []v1.ContainerPort{ + { + Name: "initport", + ContainerPort: 9111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testsidecar": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + { + "__address__": "4.3.2.1:9111", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_endpoint_port_name": "initport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_pod_container_image": "ic2:latest", + "__meta_kubernetes_pod_container_name": "ic2", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "9111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + { + "__address__": "4.3.2.1:1111", + "__meta_kubernetes_pod_container_image": "ic1:latest", + "__meta_kubernetes_pod_container_name": "ic1", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "1111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpoints_name": "testsidecar", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpoints/default/testsidecar", + }, + }, + }.Run(t) +} diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index c7e99b0a0..f7ecf994e 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -1199,3 +1199,165 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) { }) } } + +func TestEndpointSliceDiscoverySidecarContainer(t *testing.T) { + objs := []runtime.Object{ + &v1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testsidecar", + Namespace: "default", + }, + AddressType: v1.AddressTypeIPv4, + Ports: []v1.EndpointPort{ + { + Name: strptr("testport"), + Port: int32ptr(9000), + Protocol: protocolptr(corev1.ProtocolTCP), + }, + { + Name: strptr("initport"), + Port: int32ptr(9111), + Protocol: protocolptr(corev1.ProtocolTCP), + }, + }, + Endpoints: []v1.Endpoint{ + { + Addresses: []string{"4.3.2.1"}, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: corev1.PodSpec{ + NodeName: "testnode", + InitContainers: []corev1.Container{ + { + Name: "ic1", + Image: "ic1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "initport", + ContainerPort: 1111, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + { + Name: "ic2", + Image: "ic2:latest", + Ports: []corev1.ContainerPort{ + { + Name: "initport", + ContainerPort: 9111, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + + n, _ := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpointslice/default/testsidecar": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Pod", + "__meta_kubernetes_endpointslice_address_target_name": "testpod", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + { + "__address__": "4.3.2.1:9111", + "__meta_kubernetes_endpointslice_address_target_kind": "Pod", + "__meta_kubernetes_endpointslice_address_target_name": "testpod", + "__meta_kubernetes_endpointslice_port": "9111", + "__meta_kubernetes_endpointslice_port_name": "initport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_pod_container_image": "ic2:latest", + "__meta_kubernetes_pod_container_name": "ic2", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "9111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + { + "__address__": "4.3.2.1:1111", + "__meta_kubernetes_pod_container_image": "ic1:latest", + "__meta_kubernetes_pod_container_name": "ic1", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "1111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testsidecar", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpointslice/default/testsidecar", + }, + }, + }.Run(t) +} From a10dc9298ecfe8e5b03b38f31232df83412c6ed5 Mon Sep 17 00:00:00 2001 From: bas smit Date: Wed, 18 Sep 2024 10:40:18 +0200 Subject: [PATCH 25/74] sd k8s: support sidecar containers in endpoint discovery Sidecar containers are a newish feature in k8s. They're implemented similar to init containers but actually stay running and allow you to delay startup of your application pod until the sidecar started (like init containers always do). This adds the ports of the sidecar container to the list of discovered endpoint(slice), allowing you to target those containers as well. The implementation is a copy of that of Pod discovery fixes: #14927 Signed-off-by: bas smit --- discovery/kubernetes/endpoints.go | 10 ++++++++-- discovery/kubernetes/endpointslice.go | 11 +++++++++-- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index c7a60ae6d..542bc95ed 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -361,16 +361,19 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { if port.Port == cport.ContainerPort { ports := strconv.FormatUint(uint64(port.Port), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -411,7 +414,8 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { continue } - for _, c := range pe.pod.Spec.Containers { + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { @@ -428,6 +432,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), @@ -435,6 +440,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 2ac65ef41..136830310 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -377,19 +377,23 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { if port.port() == nil { continue } + if *port.port() == cport.ContainerPort { ports := strconv.FormatUint(uint64(*port.port()), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(cport.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -417,7 +421,8 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou continue } - for _, c := range pe.pod.Spec.Containers { + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { @@ -437,6 +442,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), @@ -444,6 +450,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } From 73997289c3b2d4b2ec234e4e16541559a9b90f6e Mon Sep 17 00:00:00 2001 From: bas smit Date: Wed, 18 Sep 2024 23:21:31 +0200 Subject: [PATCH 26/74] tests: update discovery tests with new labael Previous commit added the pod_container_init label to discovery, so all the tests need to reflect that. Signed-off-by: bas smit --- discovery/kubernetes/endpoints_test.go | 4 ++++ discovery/kubernetes/endpointslice_test.go | 3 +++ 2 files changed, 7 insertions(+) diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index c503448b2..4af688960 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -244,6 +244,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", @@ -259,6 +260,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -821,6 +823,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -1078,6 +1081,7 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index f7ecf994e..cc92c7dda 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -291,6 +291,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", @@ -306,6 +307,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -986,6 +988,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ From 77d3b3aff3817ec27013a094dd0a5aab64626eed Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 1 Oct 2024 14:36:52 +0200 Subject: [PATCH 27/74] OTLP: Remove experimental word form OTLP receiver (#14894) The OTLP receiver can now considered stable. We've had it for longer than a year in main and has received constant improvements. Signed-off-by: Jesus Vazquez --- cmd/prometheus/main.go | 8 ++++---- docs/command-line/prometheus.md | 3 ++- docs/feature_flags.md | 8 -------- docs/querying/api.md | 4 ++-- web/api/v1/api.go | 2 +- 5 files changed, 9 insertions(+), 16 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index f39eba3c3..d8369770b 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -182,9 +182,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { opts := strings.Split(f, ",") for _, o := range opts { switch o { - case "otlp-write-receiver": - c.web.EnableOTLPWriteReceiver = true - level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled") case "expand-external-labels": c.enableExpandExternalLabels = true level.Info(logger).Log("msg", "Experimental expand-external-labels enabled") @@ -345,6 +342,9 @@ func main() { a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())). Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs)) + a.Flag("web.enable-otlp-receiver", "Enable API endpoint accepting OTLP write requests."). + Default("false").BoolVar(&cfg.web.EnableOTLPWriteReceiver) + a.Flag("web.console.templates", "Path to the console template directory, available at /consoles."). Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath) @@ -475,7 +475,7 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index eacb45ad0..a179a2f9f 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -29,6 +29,7 @@ The Prometheus monitoring server | --web.enable-admin-api | Enable API endpoints for admin control actions. | `false` | | --web.enable-remote-write-receiver | Enable API endpoint accepting remote write requests. | `false` | | --web.remote-write-receiver.accepted-protobuf-messages | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` | +| --web.enable-otlp-receiver | Enable API endpoint accepting OTLP write requests. | `false` | | --web.console.templates | Path to the console template directory, available at /consoles. | `consoles` | | --web.console.libraries | Path to the console library directory. | `console_libraries` | | --web.page-title | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | @@ -57,7 +58,7 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 1e9455a3f..a3e2c0b9e 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -147,14 +147,6 @@ This should **only** be applied to metrics that currently produce such labels. regex: (\d+)\.0+;.*_bucket ``` -## OTLP Receiver - -`--enable-feature=otlp-write-receiver` - -The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes. -Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features -won't work when you push OTLP metrics. - ## Experimental PromQL functions `--enable-feature=promql-experimental-functions` diff --git a/docs/querying/api.md b/docs/querying/api.md index e32c8ecaf..714438398 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -1388,8 +1388,8 @@ is not considered an efficient way of ingesting samples. Use it with caution for specific low-volume use cases. It is not suitable for replacing the ingestion via scraping. -Enable the OTLP receiver by the feature flag -`--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver +Enable the OTLP receiver by setting +`--web.enable-otlp-receiver`. When enabled, the OTLP receiver endpoint is `/api/v1/otlp/v1/metrics`. *New in v2.47* diff --git a/web/api/v1/api.go b/web/api/v1/api.go index d3cc7d718..0279f727f 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1750,7 +1750,7 @@ func (api *API) otlpWrite(w http.ResponseWriter, r *http.Request) { if api.otlpWriteHandler != nil { api.otlpWriteHandler.ServeHTTP(w, r) } else { - http.Error(w, "otlp write receiver needs to be enabled with --enable-feature=otlp-write-receiver", http.StatusNotFound) + http.Error(w, "otlp write receiver needs to be enabled with --web.enable-otlp-receiver", http.StatusNotFound) } } From c5c2566b8afcb77ec559d64afe10d2f7daa18236 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 1 Oct 2024 15:15:21 +0200 Subject: [PATCH 28/74] MAINTAINERS: Add Arthur as an otlptranslator maintainer (#15024) Signed-off-by: Jesus Vazquez --- MAINTAINERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 44c07f063..de3f3c73b 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -16,7 +16,7 @@ Maintainers for specific parts of the codebase: George Krajcsovits ( / @krajorama) * `storage` * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( / @npazosmendez), Alex Greenbank ( / @alexgreenbank) - * `otlptranslator`: Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) + * `otlptranslator`: Arthur Silva Sens ( / @ArthurSens), Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) * `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez) * `web` * `ui`: Julius Volz ( / @juliusv) From 4cb5f23c35a5ccfc691485d9db69aeca16d6a59f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Rabenstein?= Date: Tue, 1 Oct 2024 18:03:46 +0200 Subject: [PATCH 29/74] api: Improve doc comments for v1.MinTime and v1.MaxTime (#14986) api: Improve doc comments for v1.MinTime and v1.MaxTime While investigated something mostly unrelated, I got nerd-sniped by the calculation of v1.MinTime and v1.MaxTime. The seemingly magic number in there (62135596801) needed an explanation. While looking for it, I found out that the offsets used here are actually needlessly conservative. Since the timestamps are so far in the past or future, respectively, that there is no practical impact, except that the calculation is needlessly obfuscated. However, we won't change the values now to not cause any confusion for users of this code. Still, I think the doc comment should explain the circumstances so nobody gets nerd-sniped again as I did today. For the record: 62135596800 is the difference in seconds between 0001-01-01 00:00:00 (Unix time zero point) and 1971-01-01 00:00:00 (Go time zero point) in the Gregorian calendar. If "Prometheus time" were in seconds (not milliseconds), that difference would be relevant to prevent over-/underflow when converting from "Prometheus time" to "Go time". Signed-off-by: beorn7 --------- Signed-off-by: beorn7 --- web/api/v1/api.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 0279f727f..46666af90 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -834,12 +834,22 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { } var ( - // MinTime is the default timestamp used for the begin of optional time ranges. - // Exposed to let downstream projects to reference it. + // MinTime is the default timestamp used for the start of optional time ranges. + // Exposed to let downstream projects reference it. + // + // Historical note: This should just be time.Unix(math.MinInt64/1000, 0).UTC(), + // but it was set to a higher value in the past due to a misunderstanding. + // The value is still low enough for practical purposes, so we don't want + // to change it now, avoiding confusion for importers of this variable. MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC() // MaxTime is the default timestamp used for the end of optional time ranges. // Exposed to let downstream projects to reference it. + // + // Historical note: This should just be time.Unix(math.MaxInt64/1000, 0).UTC(), + // but it was set to a lower value in the past due to a misunderstanding. + // The value is still high enough for practical purposes, so we don't want + // to change it now, avoiding confusion for importers of this variable. MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC() minTimeFormatted = MinTime.Format(time.RFC3339Nano) From 06e7dd609243bd6d954372704de78986134442f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:16:14 +0000 Subject: [PATCH 30/74] Bump github.com/prometheus/common Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.57.0 to 0.60.0. - [Release notes](https://github.com/prometheus/common/releases) - [Changelog](https://github.com/prometheus/common/blob/main/RELEASE.md) - [Commits](https://github.com/prometheus/common/compare/v0.57.0...v0.60.0) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 14 ++++---- documentation/examples/remote_storage/go.sum | 36 ++++++++++---------- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 8ed5084d9..a1be5c9b4 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,8 +8,8 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.6 - github.com/prometheus/client_golang v1.20.2 - github.com/prometheus/common v0.57.0 + github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/common v0.60.0 github.com/prometheus/prometheus v0.53.1 github.com/stretchr/testify v1.9.0 ) @@ -55,11 +55,11 @@ require ( go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect google.golang.org/grpc v1.65.0 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 1abeff7eb..936b448d8 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= -github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY= -github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -323,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -344,20 +344,20 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -373,17 +373,17 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 6c90ed3af792626ad93c12f2778806ed0998a464 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:17:10 +0000 Subject: [PATCH 31/74] Bump the go-opentelemetry-io group with 9 updates Bumps the go-opentelemetry-io group with 9 updates: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/collector/pdata](https://github.com/open-telemetry/opentelemetry-collector) | `1.14.1` | `1.16.0` | | [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector) | `0.108.1` | `0.110.0` | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.53.0` | `0.55.0` | | [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | | [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | | [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | Updates `go.opentelemetry.io/collector/pdata` from 1.14.1 to 1.16.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.14.1...pdata/v1.16.0) Updates `go.opentelemetry.io/collector/semconv` from 0.108.1 to 0.110.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.108.1...v0.110.0) Updates `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` from 0.53.0 to 0.55.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.53.0...zpages/v0.55.0) Updates `go.opentelemetry.io/otel` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) Updates `go.opentelemetry.io/otel/sdk` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) Updates `go.opentelemetry.io/otel/trace` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/pdata dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/trace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] --- go.mod | 30 ++++++++++++++--------------- go.sum | 60 +++++++++++++++++++++++++++++----------------------------- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/go.mod b/go.mod index c3f6bbe74..4d0c98719 100644 --- a/go.mod +++ b/go.mod @@ -62,15 +62,15 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.14.1 - go.opentelemetry.io/collector/semconv v0.108.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 - go.opentelemetry.io/otel v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 - go.opentelemetry.io/otel/sdk v1.29.0 - go.opentelemetry.io/otel/trace v1.29.0 + go.opentelemetry.io/collector/pdata v1.16.0 + go.opentelemetry.io/collector/semconv v0.110.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 + go.opentelemetry.io/otel v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 + go.opentelemetry.io/otel/sdk v1.30.0 + go.opentelemetry.io/otel/trace v1.30.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 @@ -82,8 +82,8 @@ require ( golang.org/x/time v0.6.0 golang.org/x/tools v0.24.0 google.golang.org/api v0.195.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed - google.golang.org/grpc v1.66.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/grpc v1.66.2 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -188,13 +188,13 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.26.0 // indirect + golang.org/x/crypto v0.27.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/term v0.24.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 6f31bec93..73dafaa10 100644 --- a/go.sum +++ b/go.sum @@ -732,26 +732,26 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1IxMHQf6mUk= -go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= -go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= -go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto= +go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4= +go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk= +go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -782,8 +782,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -865,8 +865,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -963,8 +963,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1094,8 +1094,8 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1116,8 +1116,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From bb0382fbafdeef9b57f0b9693199bb092906defb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:17:56 +0000 Subject: [PATCH 32/74] Bump vitest from 2.0.5 to 2.1.1 in /web/ui Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 2.0.5 to 2.1.1. - [Release notes](https://github.com/vitest-dev/vitest/releases) - [Commits](https://github.com/vitest-dev/vitest/commits/v2.1.1/packages/vitest) --- updated-dependencies: - dependency-name: vitest dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 305 ++++++++++----------------------- 2 files changed, 87 insertions(+), 220 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b..012d8bc4d 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -68,6 +68,6 @@ "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", "vite": "^5.1.0", - "vitest": "^2.0.5" + "vitest": "^2.1.1" } } diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a907480..7f13f2810 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -82,7 +82,7 @@ "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", "vite": "^5.1.0", - "vitest": "^2.0.5" + "vitest": "^2.1.1" } }, "mantine-ui/node_modules/eslint": { @@ -3385,14 +3385,13 @@ } }, "node_modules/@vitest/expect": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.0.5.tgz", - "integrity": "sha512-yHZtwuP7JZivj65Gxoi8upUN2OzHTi3zVfjwdpu2WrvCZPLwsJ2Ey5ILIPccoW23dd/zQBlJ4/dhi7DWNyXCpA==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.1.tgz", + "integrity": "sha512-YeueunS0HiHiQxk+KEOnq/QMzlUuOzbU1Go+PgAsHvvv3tUkJPm9xWt+6ITNTlzsMXUjmgm5T+U7KBPK2qQV6w==", "dev": true, - "license": "MIT", "dependencies": { - "@vitest/spy": "2.0.5", - "@vitest/utils": "2.0.5", + "@vitest/spy": "2.1.1", + "@vitest/utils": "2.1.1", "chai": "^5.1.1", "tinyrainbow": "^1.2.0" }, @@ -3400,12 +3399,38 @@ "url": "https://opencollective.com/vitest" } }, - "node_modules/@vitest/pretty-format": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.0.5.tgz", - "integrity": "sha512-h8k+1oWHfwTkyTkb9egzwNMfJAEx4veaPSnMeKbVSjp4euqGSbQlm5+6VHwTr7u4FJslVVsUG5nopCaAYdOmSQ==", + "node_modules/@vitest/mocker": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.1.tgz", + "integrity": "sha512-LNN5VwOEdJqCmJ/2XJBywB11DLlkbY0ooDJW3uRX5cZyYCrc4PI/ePX0iQhE3BiEGiQmK4GE7Q/PqCkkaiPnrA==", + "dev": true, + "dependencies": { + "@vitest/spy": "^2.1.0-beta.1", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.11" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@vitest/spy": "2.1.1", + "msw": "^2.3.5", + "vite": "^5.0.0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.1.tgz", + "integrity": "sha512-SjxPFOtuINDUW8/UkElJYQSFtnWX7tMksSGW0vfjxMneFqxVr8YJ979QpMbDW7g+BIiq88RAGDjf7en6rvLPPQ==", "dev": true, - "license": "MIT", "dependencies": { "tinyrainbow": "^1.2.0" }, @@ -3414,13 +3439,12 @@ } }, "node_modules/@vitest/runner": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.0.5.tgz", - "integrity": "sha512-TfRfZa6Bkk9ky4tW0z20WKXFEwwvWhRY+84CnSEtq4+3ZvDlJyY32oNTJtM7AW9ihW90tX/1Q78cb6FjoAs+ig==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.1.tgz", + "integrity": "sha512-uTPuY6PWOYitIkLPidaY5L3t0JJITdGTSwBtwMjKzo5O6RCOEncz9PUN+0pDidX8kTHYjO0EwUIvhlGpnGpxmA==", "dev": true, - "license": "MIT", "dependencies": { - "@vitest/utils": "2.0.5", + "@vitest/utils": "2.1.1", "pathe": "^1.1.2" }, "funding": { @@ -3428,14 +3452,13 @@ } }, "node_modules/@vitest/snapshot": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.0.5.tgz", - "integrity": "sha512-SgCPUeDFLaM0mIUHfaArq8fD2WbaXG/zVXjRupthYfYGzc8ztbFbu6dUNOblBG7XLMR1kEhS/DNnfCZ2IhdDew==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.1.tgz", + "integrity": "sha512-BnSku1WFy7r4mm96ha2FzN99AZJgpZOWrAhtQfoxjUU5YMRpq1zmHRq7a5K9/NjqonebO7iVDla+VvZS8BOWMw==", "dev": true, - "license": "MIT", "dependencies": { - "@vitest/pretty-format": "2.0.5", - "magic-string": "^0.30.10", + "@vitest/pretty-format": "2.1.1", + "magic-string": "^0.30.11", "pathe": "^1.1.2" }, "funding": { @@ -3443,11 +3466,10 @@ } }, "node_modules/@vitest/spy": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.0.5.tgz", - "integrity": "sha512-c/jdthAhvJdpfVuaexSrnawxZz6pywlTPe84LUB2m/4t3rl2fTo9NFGBG4oWgaD+FTgDDV8hJ/nibT7IfH3JfA==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.1.tgz", + "integrity": "sha512-ZM39BnZ9t/xZ/nF4UwRH5il0Sw93QnZXd9NAZGRpIgj0yvVwPpLd702s/Cx955rGaMlyBQkZJ2Ir7qyY48VZ+g==", "dev": true, - "license": "MIT", "dependencies": { "tinyspy": "^3.0.0" }, @@ -3456,14 +3478,12 @@ } }, "node_modules/@vitest/utils": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.0.5.tgz", - "integrity": "sha512-d8HKbqIcya+GR67mkZbrzhS5kKhtp8dQLcmRZLGTscGVg7yImT82cIrhtn2L8+VujWcy6KZweApgNmPsTAO/UQ==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.1.tgz", + "integrity": "sha512-Y6Q9TsI+qJ2CC0ZKj6VBb+T8UPz593N113nnUykqwANqhgf3QkZeHFlusgKLTqrnVHbj/XDKZcDHol+dxVT+rQ==", "dev": true, - "license": "MIT", "dependencies": { - "@vitest/pretty-format": "2.0.5", - "estree-walker": "^3.0.3", + "@vitest/pretty-format": "2.1.1", "loupe": "^3.1.1", "tinyrainbow": "^1.2.0" }, @@ -3625,7 +3645,6 @@ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", "dev": true, - "license": "MIT", "engines": { "node": ">=12" } @@ -3891,7 +3910,6 @@ "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -3953,7 +3971,6 @@ "resolved": "https://registry.npmjs.org/chai/-/chai-5.1.1.tgz", "integrity": "sha512-pT1ZgP8rPNqUgieVaEY+ryQr6Q4HXNg8Ei9UnLUrjN4IA7dvQC5JB+/kxVcPNDHyBcc/26CXPkbNzq3qwrOEKA==", "dev": true, - "license": "MIT", "dependencies": { "assertion-error": "^2.0.1", "check-error": "^2.1.1", @@ -3997,7 +4014,6 @@ "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", "dev": true, - "license": "MIT", "engines": { "node": ">= 16" } @@ -4286,7 +4302,6 @@ "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -4957,7 +4972,6 @@ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", "dev": true, - "license": "MIT", "dependencies": { "@types/estree": "^1.0.0" } @@ -5274,7 +5288,6 @@ "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz", "integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==", "dev": true, - "license": "MIT", "engines": { "node": "*" } @@ -7050,7 +7063,6 @@ "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.1.tgz", "integrity": "sha512-edNu/8D5MKVfGVFRhFf8aAxiTM6Wumfz5XsaatSxlD3w4R1d/WEKUTydCdPGbl9K7QG/Ca3GnDV2sIKIpXRQcw==", "dev": true, - "license": "MIT", "dependencies": { "get-func-name": "^2.0.1" } @@ -7078,7 +7090,6 @@ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.11.tgz", "integrity": "sha512-+Wri9p0QHMy+545hKww7YAu5NyzF8iomPL/RQazugQ9+Ez4Ic3mERMd8ZTX5rfK944j+560ZJi8iAwgak1Ac7A==", "dev": true, - "license": "MIT", "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0" } @@ -7123,7 +7134,8 @@ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/merge2": { "version": "1.4.1", @@ -7547,15 +7559,13 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/pathval": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==", "dev": true, - "license": "MIT", "engines": { "node": ">= 14.16" } @@ -8821,6 +8831,12 @@ "dev": true, "license": "MIT" }, + "node_modules/tinyexec": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.0.tgz", + "integrity": "sha512-tVGE0mVJPGb0chKhqmsoosjsS+qUnJVGJpZgsHYQcGoPlG3B51R3PouqTgEGH2Dc9jjFyOqOpix6ZHNMXp1FZg==", + "dev": true + }, "node_modules/tinypool": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.0.1.tgz", @@ -8836,17 +8852,15 @@ "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz", "integrity": "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=14.0.0" } }, "node_modules/tinyspy": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.0.tgz", - "integrity": "sha512-q5nmENpTHgiPVd1cJDDc9cVoYN5x4vCvwT3FMilvKPKneCBZAxn2YWQjDF0UMcE9k0Cay1gBiDfTMU0g+mPMQA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz", + "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=14.0.0" } @@ -9315,16 +9329,14 @@ } }, "node_modules/vite-node": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.0.5.tgz", - "integrity": "sha512-LdsW4pxj0Ot69FAoXZ1yTnA9bjGohr2yNBU7QKRxpz8ITSkhuDl6h3zS/tvgz4qrNjeRnvrWeXQ8ZF7Um4W00Q==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.1.tgz", + "integrity": "sha512-N/mGckI1suG/5wQI35XeR9rsMsPqKXzq1CdUndzVstBj/HvyxxGctwnK6WX43NGt5L3Z5tcRf83g4TITKJhPrA==", "dev": true, - "license": "MIT", "dependencies": { "cac": "^6.7.14", - "debug": "^4.3.5", + "debug": "^4.3.6", "pathe": "^1.1.2", - "tinyrainbow": "^1.2.0", "vite": "^5.0.0" }, "bin": { @@ -9338,30 +9350,29 @@ } }, "node_modules/vitest": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.0.5.tgz", - "integrity": "sha512-8GUxONfauuIdeSl5f9GTgVEpg5BTOlplET4WEDaeY2QBiN8wSm68vxN/tb5z405OwppfoCavnwXafiaYBC/xOA==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.1.tgz", + "integrity": "sha512-97We7/VC0e9X5zBVkvt7SGQMGrRtn3KtySFQG5fpaMlS+l62eeXRQO633AYhSTC3z7IMebnPPNjGXVGNRFlxBA==", "dev": true, - "license": "MIT", "dependencies": { - "@ampproject/remapping": "^2.3.0", - "@vitest/expect": "2.0.5", - "@vitest/pretty-format": "^2.0.5", - "@vitest/runner": "2.0.5", - "@vitest/snapshot": "2.0.5", - "@vitest/spy": "2.0.5", - "@vitest/utils": "2.0.5", + "@vitest/expect": "2.1.1", + "@vitest/mocker": "2.1.1", + "@vitest/pretty-format": "^2.1.1", + "@vitest/runner": "2.1.1", + "@vitest/snapshot": "2.1.1", + "@vitest/spy": "2.1.1", + "@vitest/utils": "2.1.1", "chai": "^5.1.1", - "debug": "^4.3.5", - "execa": "^8.0.1", - "magic-string": "^0.30.10", + "debug": "^4.3.6", + "magic-string": "^0.30.11", "pathe": "^1.1.2", "std-env": "^3.7.0", - "tinybench": "^2.8.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.0", "tinypool": "^1.0.0", "tinyrainbow": "^1.2.0", "vite": "^5.0.0", - "vite-node": "2.0.5", + "vite-node": "2.1.1", "why-is-node-running": "^2.3.0" }, "bin": { @@ -9376,8 +9387,8 @@ "peerDependencies": { "@edge-runtime/vm": "*", "@types/node": "^18.0.0 || >=20.0.0", - "@vitest/browser": "2.0.5", - "@vitest/ui": "2.0.5", + "@vitest/browser": "2.1.1", + "@vitest/ui": "2.1.1", "happy-dom": "*", "jsdom": "*" }, @@ -9402,150 +9413,6 @@ } } }, - "node_modules/vitest/node_modules/execa": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", - "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", - "dev": true, - "license": "MIT", - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^8.0.1", - "human-signals": "^5.0.0", - "is-stream": "^3.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^5.1.0", - "onetime": "^6.0.0", - "signal-exit": "^4.1.0", - "strip-final-newline": "^3.0.0" - }, - "engines": { - "node": ">=16.17" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/vitest/node_modules/get-stream": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", - "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/human-signals": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", - "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=16.17.0" - } - }, - "node_modules/vitest/node_modules/is-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", - "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/mimic-fn": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", - "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/npm-run-path": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", - "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-key": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/onetime": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", - "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "mimic-fn": "^4.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/path-key": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", - "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/vitest/node_modules/strip-final-newline": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", - "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/w3c-keyname": { "version": "2.2.8", "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", From a425dbfa72490c599ea235a0ef2278e56b40c005 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:18:11 +0000 Subject: [PATCH 33/74] Bump react-router-dom from 6.26.1 to 6.26.2 in /web/ui Bumps [react-router-dom](https://github.com/remix-run/react-router/tree/HEAD/packages/react-router-dom) from 6.26.1 to 6.26.2. - [Release notes](https://github.com/remix-run/react-router/releases) - [Changelog](https://github.com/remix-run/react-router/blob/main/packages/react-router-dom/CHANGELOG.md) - [Commits](https://github.com/remix-run/react-router/commits/react-router-dom@6.26.2/packages/react-router-dom) --- updated-dependencies: - dependency-name: react-router-dom dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 29 +++++++++++++---------------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b..729da16a6 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -44,7 +44,7 @@ "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.1.2", - "react-router-dom": "^6.26.1", + "react-router-dom": "^6.26.2", "sanitize-html": "^2.13.0", "uplot": "^1.6.30", "uplot-react": "^1.2.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a907480..78eaaeb04 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -58,7 +58,7 @@ "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.1.2", - "react-router-dom": "^6.26.1", + "react-router-dom": "^6.26.2", "sanitize-html": "^2.13.0", "uplot": "^1.6.30", "uplot-react": "^1.2.2", @@ -2364,10 +2364,9 @@ } }, "node_modules/@remix-run/router": { - "version": "1.19.1", - "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.19.1.tgz", - "integrity": "sha512-S45oynt/WH19bHbIXjtli6QmwNYvaz+vtnubvNpNDvUOoA/OWh6j1OikIP3G+v5GHdxyC6EXoChG3HgYGEUfcg==", - "license": "MIT", + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.19.2.tgz", + "integrity": "sha512-baiMx18+IMuD1yyvOGaHM9QrVUPGGG0jC+z+IPHnRJWUAUvaKuWKyE8gjDj2rzv3sz9zOGoRSPgeBVHRhZnBlA==", "engines": { "node": ">=14.0.0" } @@ -8113,12 +8112,11 @@ } }, "node_modules/react-router": { - "version": "6.26.1", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.26.1.tgz", - "integrity": "sha512-kIwJveZNwp7teQRI5QmwWo39A5bXRyqpH0COKKmPnyD2vBvDwgFXSqDUYtt1h+FEyfnE8eXr7oe0MxRzVwCcvQ==", - "license": "MIT", + "version": "6.26.2", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.26.2.tgz", + "integrity": "sha512-tvN1iuT03kHgOFnLPfLJ8V95eijteveqdOSk+srqfePtQvqCExB8eHOYnlilbOcyJyKnYkr1vJvf7YqotAJu1A==", "dependencies": { - "@remix-run/router": "1.19.1" + "@remix-run/router": "1.19.2" }, "engines": { "node": ">=14.0.0" @@ -8128,13 +8126,12 @@ } }, "node_modules/react-router-dom": { - "version": "6.26.1", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.26.1.tgz", - "integrity": "sha512-veut7m41S1fLql4pLhxeSW3jlqs+4MtjRLj0xvuCEXsxusJCbs6I8yn9BxzzDX2XDgafrccY6hwjmd/bL54tFw==", - "license": "MIT", + "version": "6.26.2", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.26.2.tgz", + "integrity": "sha512-z7YkaEW0Dy35T3/QKPYB1LjMK2R1fxnHO8kWpUMTBdfVzZrWOiY9a7CtN8HqdWtDUWd5FY6Dl8HFsqVwH4uOtQ==", "dependencies": { - "@remix-run/router": "1.19.1", - "react-router": "6.26.1" + "@remix-run/router": "1.19.2", + "react-router": "6.26.2" }, "engines": { "node": ">=14.0.0" From e05ab0c8f16407e08a1cfe60d7415dd4a5226534 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:18:21 +0000 Subject: [PATCH 34/74] Bump github.com/linode/linodego from 1.40.0 to 1.41.0 Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.40.0 to 1.41.0. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.40.0...v1.41.0) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index c3f6bbe74..9decdbe5a 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.9 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.40.0 + github.com/linode/linodego v1.41.0 github.com/miekg/dns v1.1.62 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f @@ -190,11 +190,11 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.26.0 // indirect + golang.org/x/crypto v0.27.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/term v0.24.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 6f31bec93..57c5590af 100644 --- a/go.sum +++ b/go.sum @@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI= -github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM= +github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= +github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -782,8 +782,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -865,8 +865,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -963,8 +963,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 8907583524630c13dfae2c787b5a3efdf63587ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:18:22 +0000 Subject: [PATCH 35/74] Bump @codemirror/view from 6.33.0 to 6.34.1 in /web/ui Bumps [@codemirror/view](https://github.com/codemirror/view) from 6.33.0 to 6.34.1. - [Changelog](https://github.com/codemirror/view/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/view/compare/6.33.0...6.34.1) --- updated-dependencies: - dependency-name: "@codemirror/view" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 11 +++++------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b..6b65bf9bb 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -16,7 +16,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.4.1", - "@codemirror/view": "^6.33.0", + "@codemirror/view": "^6.34.1", "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.1", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 973cfca9e..cf05c6f64 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -37,7 +37,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.29.1", + "@codemirror/view": "^6.34.1", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a907480..5de754b32 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -30,7 +30,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.4.1", - "@codemirror/view": "^6.33.0", + "@codemirror/view": "^6.34.1", "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.1", @@ -171,7 +171,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.29.1", + "@codemirror/view": "^6.34.1", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", @@ -997,10 +997,9 @@ } }, "node_modules/@codemirror/view": { - "version": "6.33.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.33.0.tgz", - "integrity": "sha512-AroaR3BvnjRW8fiZBalAaK+ZzB5usGgI014YKElYZvQdNH5ZIidHlO+cyf/2rWzyBFRkvG6VhiXeAEbC53P2YQ==", - "license": "MIT", + "version": "6.34.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.34.1.tgz", + "integrity": "sha512-t1zK/l9UiRqwUNPm+pdIT0qzJlzuVckbTEMVNFhfWkGiBQClstzg+78vedCvLSX0xJEZ6lwZbPpnljL7L6iwMQ==", "dependencies": { "@codemirror/state": "^6.4.0", "style-mod": "^4.1.0", From cbb4ed0c3bb6536c18231a44ea938e975f562d85 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:18:37 +0000 Subject: [PATCH 36/74] Bump @mantine/dates from 7.12.2 to 7.13.1 in /web/ui Bumps [@mantine/dates](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/dates) from 7.12.2 to 7.13.1. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.13.1/packages/@mantine/dates) --- updated-dependencies: - dependency-name: "@mantine/dates" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 29 +++++++++++++---------------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b..900793e3e 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -22,7 +22,7 @@ "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.11.2", "@mantine/core": "^7.11.2", - "@mantine/dates": "^7.11.2", + "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.11.2", "@microsoft/fetch-event-source": "^2.0.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a907480..f64d86c5a 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -36,7 +36,7 @@ "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.11.2", "@mantine/core": "^7.11.2", - "@mantine/dates": "^7.11.2", + "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.11.2", "@microsoft/fetch-event-source": "^2.0.1", @@ -2188,10 +2188,9 @@ } }, "node_modules/@mantine/core": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.12.2.tgz", - "integrity": "sha512-FrMHOKq4s3CiPIxqZ9xnVX7H4PEGNmbtHMvWO/0YlfPgoV0Er/N/DNJOFW1ys4WSnidPTayYeB41riyxxGOpRQ==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.13.1.tgz", + "integrity": "sha512-KH/WrcY/5pf3FxUUbtG77xyd7kfp6SRPAJFkxjFlg9kXroiQ7baljphY371CwPYPINERShUdvCQLpz4r4WMIHA==", "dependencies": { "@floating-ui/react": "^0.26.9", "clsx": "^2.1.1", @@ -2201,32 +2200,30 @@ "type-fest": "^4.12.0" }, "peerDependencies": { - "@mantine/hooks": "7.12.2", + "@mantine/hooks": "7.13.1", "react": "^18.2.0", "react-dom": "^18.2.0" } }, "node_modules/@mantine/dates": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.12.2.tgz", - "integrity": "sha512-qsDDl9qF80QLG1n6JiysyELAhbNLbV3qmXRAIU3GJLLxtZfyD9ntOUg0B64EpNl3Py4btXNo4yniFdu1JSUgwg==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.13.1.tgz", + "integrity": "sha512-KzzAehnftPAiGhJhOaRcWBuQ5+f5HrqnpNjH2/0KN+dv3gUfitAbapXOmCYOTdzS9Zk+RqqsD5VKvsbr1giXtQ==", "dependencies": { "clsx": "^2.1.1" }, "peerDependencies": { - "@mantine/core": "7.12.2", - "@mantine/hooks": "7.12.2", + "@mantine/core": "7.13.1", + "@mantine/hooks": "7.13.1", "dayjs": ">=1.0.0", "react": "^18.2.0", "react-dom": "^18.2.0" } }, "node_modules/@mantine/hooks": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.12.2.tgz", - "integrity": "sha512-dVMw8jpM0hAzc8e7/GNvzkk9N0RN/m+PKycETB3H6lJGuXJJSRR4wzzgQKpEhHwPccktDpvb4rkukKDq2jA8Fg==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.13.1.tgz", + "integrity": "sha512-Hfd4v380pPJUKDbARk+egdAanx7bpGZmaOn8G3QBZjwDufVopxip0WPkifUKUIMeYY1CTboa+1go9l56ZWrrSg==", "peerDependencies": { "react": "^18.2.0" } From 8f64d9144bbba0f83e9654a8b51ff9c0f1dcdd02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:19:00 +0000 Subject: [PATCH 37/74] Bump eslint-plugin-react-refresh from 0.4.11 to 0.4.12 in /web/ui Bumps [eslint-plugin-react-refresh](https://github.com/ArnaudBarre/eslint-plugin-react-refresh) from 0.4.11 to 0.4.12. - [Release notes](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/releases) - [Changelog](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/blob/main/CHANGELOG.md) - [Commits](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/compare/v0.4.11...v0.4.12) --- updated-dependencies: - dependency-name: eslint-plugin-react-refresh dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b..80bdac792 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -61,7 +61,7 @@ "@vitejs/plugin-react": "^4.2.1", "eslint": "^9.9.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", - "eslint-plugin-react-refresh": "^0.4.11", + "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", "jsdom": "^25.0.0", "postcss": "^8.4.35", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a907480..6bd7a365f 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -75,7 +75,7 @@ "@vitejs/plugin-react": "^4.2.1", "eslint": "^9.9.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", - "eslint-plugin-react-refresh": "^0.4.11", + "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", "jsdom": "^25.0.0", "postcss": "^8.4.35", @@ -4696,11 +4696,10 @@ } }, "node_modules/eslint-plugin-react-refresh": { - "version": "0.4.11", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.11.tgz", - "integrity": "sha512-wrAKxMbVr8qhXTtIKfXqAn5SAtRZt0aXxe5P23Fh4pUAdC6XEsybGLB8P0PI4j1yYqOgUEUlzKAGDfo7rJOjcw==", + "version": "0.4.12", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.12.tgz", + "integrity": "sha512-9neVjoGv20FwYtCP6CB1dzR1vr57ZDNOXst21wd2xJ/cTlM2xLq0GWVlSNTdMn/4BtP6cHYBMCSp1wFBJ9jBsg==", "dev": true, - "license": "MIT", "peerDependencies": { "eslint": ">=7" } From 50ccf5cfd712397b1058867111ddade3c35a16b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:19:11 +0000 Subject: [PATCH 38/74] Bump @types/lodash from 4.17.7 to 4.17.9 in /web/ui Bumps [@types/lodash](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/lodash) from 4.17.7 to 4.17.9. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/lodash) --- updated-dependencies: - dependency-name: "@types/lodash" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b..9be7b951a 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -34,7 +34,7 @@ "@tanstack/react-query": "^5.22.2", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", - "@types/lodash": "^4.17.7", + "@types/lodash": "^4.17.9", "@types/sanitize-html": "^2.13.0", "@uiw/react-codemirror": "^4.23.1", "clsx": "^2.1.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a907480..e8b6a76b4 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -48,7 +48,7 @@ "@tanstack/react-query": "^5.22.2", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", - "@types/lodash": "^4.17.7", + "@types/lodash": "^4.17.9", "@types/sanitize-html": "^2.13.0", "@uiw/react-codemirror": "^4.23.1", "clsx": "^2.1.1", @@ -2971,10 +2971,9 @@ "license": "MIT" }, "node_modules/@types/lodash": { - "version": "4.17.7", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.7.tgz", - "integrity": "sha512-8wTvZawATi/lsmNu10/j2hk1KEP0IvjubqPE3cu1Xz7xfXXt5oCq3SNUz4fMIP4XGF9Ky+Ue2tBA3hcS7LSBlA==", - "license": "MIT" + "version": "4.17.9", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.9.tgz", + "integrity": "sha512-w9iWudx1XWOHW5lQRS9iKpK/XuRhnN+0T7HvdCCd802FYkT1AMTnxndJHGrNJwRoRHkslGr4S29tjm1cT7x/7w==" }, "node_modules/@types/node": { "version": "22.5.4", From 8d9850b7ffa42d5214736380b135c9d852dff451 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:21:30 +0000 Subject: [PATCH 39/74] Bump @eslint/js from 9.9.1 to 9.11.1 in /web/ui Bumps [@eslint/js](https://github.com/eslint/eslint/tree/HEAD/packages/js) from 9.9.1 to 9.11.1. - [Release notes](https://github.com/eslint/eslint/releases) - [Changelog](https://github.com/eslint/eslint/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint/eslint/commits/v9.11.1/packages/js) --- updated-dependencies: - dependency-name: "@eslint/js" dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b..c1083f646 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -53,7 +53,7 @@ "devDependencies": { "@eslint/compat": "^1.1.1", "@eslint/eslintrc": "^3.1.0", - "@eslint/js": "^9.9.1", + "@eslint/js": "^9.11.1", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a907480..9411dc851 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -67,7 +67,7 @@ "devDependencies": { "@eslint/compat": "^1.1.1", "@eslint/eslintrc": "^3.1.0", - "@eslint/js": "^9.9.1", + "@eslint/js": "^9.11.1", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", @@ -145,6 +145,15 @@ } } }, + "mantine-ui/node_modules/eslint/node_modules/@eslint/js": { + "version": "9.9.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.9.1.tgz", + "integrity": "sha512-xIDQRsfg5hNBqHz04H1R3scSVwmI+KUbqjsQKHKQ1DAUSaUjYPReZZmS/5PNiKu1fUvzDd6H7DEDKACSEhu+TQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "mantine-ui/node_modules/globals": { "version": "15.9.0", "resolved": "https://registry.npmjs.org/globals/-/globals-15.9.0.tgz", @@ -1487,11 +1496,10 @@ } }, "node_modules/@eslint/js": { - "version": "9.9.1", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.9.1.tgz", - "integrity": "sha512-xIDQRsfg5hNBqHz04H1R3scSVwmI+KUbqjsQKHKQ1DAUSaUjYPReZZmS/5PNiKu1fUvzDd6H7DEDKACSEhu+TQ==", + "version": "9.11.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.11.1.tgz", + "integrity": "sha512-/qu+TWz8WwPWc7/HcIJKi+c+MOm46GdVaSlTTQcaqaL53+GsoA6MxWp5PtTx48qbSP7ylM1Kn7nhvkugfJvRSA==", "dev": true, - "license": "MIT", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } From 83efefd35db6b7779ee32fbd39d3ed33d5b1d940 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:21:44 +0000 Subject: [PATCH 40/74] Bump @types/jest from 29.5.12 to 29.5.13 in /web/ui Bumps [@types/jest](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/jest) from 29.5.12 to 29.5.13. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/jest) --- updated-dependencies: - dependency-name: "@types/jest" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 9 ++++----- web/ui/package.json | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a907480..75efd90ec 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -12,7 +12,7 @@ "module/*" ], "devDependencies": { - "@types/jest": "^29.5.12", + "@types/jest": "^29.5.13", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "eslint-config-prettier": "^9.1.0", @@ -2918,11 +2918,10 @@ } }, "node_modules/@types/jest": { - "version": "29.5.12", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.12.tgz", - "integrity": "sha512-eDC8bTvT/QhYdxJAulQikueigY5AsdBRH2yDKW3yveW7svY3+DzN84/2NUgkw10RTiJbWqZrTtoGVdYlvFJdLw==", + "version": "29.5.13", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.13.tgz", + "integrity": "sha512-wd+MVEZCHt23V0/L642O5APvspWply/rGY5BcW4SUETo2UzPU3Z26qr8jC2qxpimI2jjx9h7+2cj2FwIr01bXg==", "dev": true, - "license": "MIT", "dependencies": { "expect": "^29.0.0", "pretty-format": "^29.0.0" diff --git a/web/ui/package.json b/web/ui/package.json index 639ef70ab..6aa39d1a7 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -15,7 +15,7 @@ "module/*" ], "devDependencies": { - "@types/jest": "^29.5.12", + "@types/jest": "^29.5.13", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "eslint-config-prettier": "^9.1.0", From 0a61cc0363cd84009bb519340fd42071746d8ecd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:46:03 +0000 Subject: [PATCH 41/74] Bump actions/setup-node from 4.0.3 to 4.0.4 Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4.0.3 to 4.0.4. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](https://github.com/actions/setup-node/compare/1e60f620b9541d16bece96c5465dc8ee9832be0b...0a44ba7841725637a19e28fa30b79a866c81b0a6) --- updated-dependencies: - dependency-name: actions/setup-node dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9e614cb2d..2ef0e97a1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -243,7 +243,7 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - name: Install nodejs - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4 with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" From bc82eacd2e5a274bae18d6e5a4bdf1bfba535d6f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:46:13 +0000 Subject: [PATCH 42/74] Bump actions/checkout from 4.1.7 to 4.2.0 in /scripts Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.7 to 4.2.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/692973e3d937129bcbf40652eb9f2f61becf3332...d632683dd7b4114ad314bca15554477dd762a938) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index a15cfc97f..1c099932b 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: From 886d705653f79909d1bfde24bbec3b09ff87b619 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:46:14 +0000 Subject: [PATCH 43/74] Bump github/codeql-action from 3.26.6 to 3.26.10 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.26.6 to 3.26.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/4dd16135b69a43b6c8efb853346f8437d92d3c93...e2b3eafc8d227b0241d48be5f425d47c2d750a13) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 89aa2ba29..1466f4ec2 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Initialize CodeQL - uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/autobuild@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/analyze@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 7e0ed7dc3..b5fbc7c94 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # tag=v3.26.6 + uses: github/codeql-action/upload-sarif@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # tag=v3.26.10 with: sarif_file: results.sarif From 2a69565cef4a229923f6f270e2e137158e802ae2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:46:19 +0000 Subject: [PATCH 44/74] Bump bufbuild/buf-setup-action from 1.39.0 to 1.43.0 Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.39.0 to 1.43.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/54abbed4fe8d8d45173eca4798b0c39a53a7b658...62ee92603c244ad0da98bab36a834a999a5329e6) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 3f6cf76e1..8f932b759 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 + - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 632d38cb0..1b189926f 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 + - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 From 6d4de289188d844bd2ffb4e106c3c3a19e2ce708 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:03:42 +0000 Subject: [PATCH 45/74] Bump @mantine/notifications from 7.12.2 to 7.13.1 in /web/ui Bumps [@mantine/notifications](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/notifications) from 7.12.2 to 7.13.1. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.13.1/packages/@mantine/notifications) --- updated-dependencies: - dependency-name: "@mantine/notifications" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 22 ++++++++++------------ 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 374e57174..92795d17a 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -24,7 +24,7 @@ "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", - "@mantine/notifications": "^7.11.2", + "@mantine/notifications": "^7.13.1", "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index fab7181df..41f567fe8 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -38,7 +38,7 @@ "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", - "@mantine/notifications": "^7.11.2", + "@mantine/notifications": "^7.13.1", "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", @@ -2229,26 +2229,24 @@ } }, "node_modules/@mantine/notifications": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.12.2.tgz", - "integrity": "sha512-gTvLHkoAZ42v5bZxibP9A50djp5ndEwumVhHSa7mxQ8oSS23tt3It/6hOqH7M+9kHY0a8s+viMiflUzTByA9qg==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.13.1.tgz", + "integrity": "sha512-Lc66wRar/nqADoaSlLHntREWbMlDDVs/Sabla2ac/V8jftLOnQpVPMefMpFVGYNJdhT3mG/9bguZV5K7pkjSXQ==", "dependencies": { - "@mantine/store": "7.12.2", + "@mantine/store": "7.13.1", "react-transition-group": "4.4.5" }, "peerDependencies": { - "@mantine/core": "7.12.2", - "@mantine/hooks": "7.12.2", + "@mantine/core": "7.13.1", + "@mantine/hooks": "7.13.1", "react": "^18.2.0", "react-dom": "^18.2.0" } }, "node_modules/@mantine/store": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.12.2.tgz", - "integrity": "sha512-NqL31sO/KcAETEWP/CiXrQOQNoE4168vZsxyXacQHGBueVMJa64WIDQtKLHrCnFRMws3vsXF02/OO4bH4XGcMQ==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.13.1.tgz", + "integrity": "sha512-/ZiVU8oFMkzSNrXqAvxb9ZfHWgVg7E8apUEQCzBh9sxgxdVoM9Y1+2YqOoi885hxskmPpkmGP+VGOJnQ6OKJig==", "peerDependencies": { "react": "^18.2.0" } From bcaf1084e63165d1fb969a3fb5e73edb7178afe8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:03:48 +0000 Subject: [PATCH 46/74] Bump @mantine/code-highlight from 7.12.2 to 7.13.1 in /web/ui Bumps [@mantine/code-highlight](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/code-highlight) from 7.12.2 to 7.13.1. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.13.1/packages/@mantine/code-highlight) --- updated-dependencies: - dependency-name: "@mantine/code-highlight" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 13 ++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 374e57174..77fc52c20 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -20,7 +20,7 @@ "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.1", - "@mantine/code-highlight": "^7.11.2", + "@mantine/code-highlight": "^7.13.1", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index fab7181df..9c387bad7 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -34,7 +34,7 @@ "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.1", - "@mantine/code-highlight": "^7.11.2", + "@mantine/code-highlight": "^7.13.1", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", @@ -2172,17 +2172,16 @@ } }, "node_modules/@mantine/code-highlight": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.12.2.tgz", - "integrity": "sha512-eVVA6ZmtV2qV60qiQW3wvFbs0ryCmzrCJaqU4GV0D+6lGVn8mwbbo36+Jt4Qz/6FrswPD99ALRBlOwHDJe0P8A==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.13.1.tgz", + "integrity": "sha512-7Iz6ymlTFf8hRu7OBUDOaevr2cnOPtktnDJ+9KtYibA7iZoaMxtv7CfarhfcYghDdPK9HOIQpAJkbzD5NgwjYQ==", "dependencies": { "clsx": "^2.1.1", "highlight.js": "^11.9.0" }, "peerDependencies": { - "@mantine/core": "7.12.2", - "@mantine/hooks": "7.12.2", + "@mantine/core": "7.13.1", + "@mantine/hooks": "7.13.1", "react": "^18.2.0", "react-dom": "^18.2.0" } From f24e2109ad4edf20824ab4f80f4f514436189ad2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:09:11 +0000 Subject: [PATCH 47/74] Bump @uiw/react-codemirror from 4.23.1 to 4.23.3 in /web/ui Bumps [@uiw/react-codemirror](https://github.com/uiwjs/react-codemirror) from 4.23.1 to 4.23.3. - [Release notes](https://github.com/uiwjs/react-codemirror/releases) - [Commits](https://github.com/uiwjs/react-codemirror/compare/v4.23.1...v4.23.3) --- updated-dependencies: - dependency-name: "@uiw/react-codemirror" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 0e57908ec..8681a68d5 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -36,7 +36,7 @@ "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.9", "@types/sanitize-html": "^2.13.0", - "@uiw/react-codemirror": "^4.23.1", + "@uiw/react-codemirror": "^4.23.3", "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 5c9265a2a..bf024bb32 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -50,7 +50,7 @@ "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.9", "@types/sanitize-html": "^2.13.0", - "@uiw/react-codemirror": "^4.23.1", + "@uiw/react-codemirror": "^4.23.3", "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", @@ -3299,10 +3299,9 @@ } }, "node_modules/@uiw/codemirror-extensions-basic-setup": { - "version": "4.23.1", - "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.1.tgz", - "integrity": "sha512-l/1iBZt3Ao9ElUvUvA0CI8bLcGw0kgV0976l1u3psYMfKYJl5TwSHn6JOeSt/iCq/13exp1f7u+zFMRwtzeinw==", - "license": "MIT", + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.3.tgz", + "integrity": "sha512-nEMjgbCyeLx+UQgOGAAoUWYFE34z5TlyaKNszuig/BddYFDb0WKcgmC37bDFxR2dZssf3K/lwGWLpXnGKXePbA==", "dependencies": { "@codemirror/autocomplete": "^6.0.0", "@codemirror/commands": "^6.0.0", @@ -3326,16 +3325,15 @@ } }, "node_modules/@uiw/react-codemirror": { - "version": "4.23.1", - "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.1.tgz", - "integrity": "sha512-OUrBY/7gvmiolgP4m9UlsGAzNce9YEzmDvPPAc+g27q+BZEJYeWQCzqtjtXfL7OkwQcZ0Aea2DuUUZRUTTIyxg==", - "license": "MIT", + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.3.tgz", + "integrity": "sha512-TBBLUbeqXmfQSfO+f3rPNOAb+QXbSm7KPB64FHQWLGg2WJNbpOhjLOWMyL+C4ZP3aSCNc2Y5aftEK1vp3wCKTA==", "dependencies": { "@babel/runtime": "^7.18.6", "@codemirror/commands": "^6.1.0", "@codemirror/state": "^6.1.1", "@codemirror/theme-one-dark": "^6.0.0", - "@uiw/codemirror-extensions-basic-setup": "4.23.1", + "@uiw/codemirror-extensions-basic-setup": "4.23.3", "codemirror": "^6.0.0" }, "funding": { From 1da185244e64b3c68b63f9e3cb412d9a87aa7233 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:09:28 +0000 Subject: [PATCH 48/74] Bump @tanstack/react-query from 5.53.2 to 5.59.0 in /web/ui Bumps [@tanstack/react-query](https://github.com/TanStack/query/tree/HEAD/packages/react-query) from 5.53.2 to 5.59.0. - [Release notes](https://github.com/TanStack/query/releases) - [Commits](https://github.com/TanStack/query/commits/v5.59.0/packages/react-query) --- updated-dependencies: - dependency-name: "@tanstack/react-query" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 0e57908ec..00265c28a 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -31,7 +31,7 @@ "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^2.47.0", - "@tanstack/react-query": "^5.22.2", + "@tanstack/react-query": "^5.59.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.9", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 5c9265a2a..5cc184069 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -45,7 +45,7 @@ "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^2.47.0", - "@tanstack/react-query": "^5.22.2", + "@tanstack/react-query": "^5.59.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.9", @@ -2705,22 +2705,20 @@ } }, "node_modules/@tanstack/query-core": { - "version": "5.53.2", - "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.53.2.tgz", - "integrity": "sha512-gCsABpRrYfLsmwcQ0JCE5I3LOQ9KYrDDSnseUDP3T7ukV8E7+lhlHDJS4Gegt1TSZCsxKhc1J5A7TkF5ePjDUQ==", - "license": "MIT", + "version": "5.59.0", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.59.0.tgz", + "integrity": "sha512-WGD8uIhX6/deH/tkZqPNcRyAhDUqs729bWKoByYHSogcshXfFbppOdTER5+qY7mFvu8KEFJwT0nxr8RfPTVh0Q==", "funding": { "type": "github", "url": "https://github.com/sponsors/tannerlinsley" } }, "node_modules/@tanstack/react-query": { - "version": "5.53.2", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.53.2.tgz", - "integrity": "sha512-ZxG/rspElkfqg2LElnNtsNgPtiCZ4Wl2XY43bATQqPvNgyrhzbCFzCjDwSQy9fJhSiDVALSlxYS8YOIiToqQmg==", - "license": "MIT", + "version": "5.59.0", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.59.0.tgz", + "integrity": "sha512-YDXp3OORbYR+8HNQx+lf4F73NoiCmCcSvZvgxE29OifmQFk0sBlO26NWLHpcNERo92tVk3w+JQ53/vkcRUY1hA==", "dependencies": { - "@tanstack/query-core": "5.53.2" + "@tanstack/query-core": "5.59.0" }, "funding": { "type": "github", From 0e2623910f1a7f091eec4dcf5b65bd2d9e65105b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:13:09 +0000 Subject: [PATCH 49/74] Bump jsdom from 25.0.0 to 25.0.1 in /web/ui Bumps [jsdom](https://github.com/jsdom/jsdom) from 25.0.0 to 25.0.1. - [Release notes](https://github.com/jsdom/jsdom/releases) - [Changelog](https://github.com/jsdom/jsdom/blob/main/Changelog.md) - [Commits](https://github.com/jsdom/jsdom/compare/25.0.0...25.0.1) --- updated-dependencies: - dependency-name: jsdom dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 103 +++++++++++---------------------- 2 files changed, 34 insertions(+), 71 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index e8244022b..7181d7bb3 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -63,7 +63,7 @@ "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", - "jsdom": "^25.0.0", + "jsdom": "^25.0.1", "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index b580a3aa9..f2765bdb4 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -77,7 +77,7 @@ "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", - "jsdom": "^25.0.0", + "jsdom": "^25.0.1", "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", @@ -4206,25 +4206,17 @@ } }, "node_modules/cssstyle": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.0.1.tgz", - "integrity": "sha512-8ZYiJ3A/3OkDd093CBT/0UKDWry7ak4BdPTFP2+QEP7cmhouyq/Up709ASSj2cK02BbZiMgk7kYjZNS4QP5qrQ==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.1.0.tgz", + "integrity": "sha512-h66W1URKpBS5YMI/V8PyXvTMFT8SupJ1IzoIV8IeBC/ji8WVmrO8dGlTi+2dh6whmdk6BiKJLD/ZBkhWbcg6nA==", "dev": true, - "license": "MIT", "dependencies": { - "rrweb-cssom": "^0.6.0" + "rrweb-cssom": "^0.7.1" }, "engines": { "node": ">=18" } }, - "node_modules/cssstyle/node_modules/rrweb-cssom": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.6.0.tgz", - "integrity": "sha512-APM0Gt1KoXBz0iIkkdB/kfvGOwC4UuJFeG/c+yV7wSc7q96cG/kJ0HiYCnzivD9SB53cLV1MlHFNfOuPaadYSw==", - "dev": true, - "license": "MIT" - }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", @@ -6848,13 +6840,12 @@ } }, "node_modules/jsdom": { - "version": "25.0.0", - "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-25.0.0.tgz", - "integrity": "sha512-OhoFVT59T7aEq75TVw9xxEfkXgacpqAhQaYgP9y/fDqWQCMB/b1H66RfmPm/MaeaAIU9nDwMOVTlPN51+ao6CQ==", + "version": "25.0.1", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-25.0.1.tgz", + "integrity": "sha512-8i7LzZj7BF8uplX+ZyOlIz86V6TAsSs+np6m1kpW9u0JWi4z/1t+FzcK1aek+ybTnAC4KhBL4uXCNT0wcUIeCw==", "dev": true, - "license": "MIT", "dependencies": { - "cssstyle": "^4.0.1", + "cssstyle": "^4.1.0", "data-urls": "^5.0.0", "decimal.js": "^10.4.3", "form-data": "^4.0.0", @@ -6867,7 +6858,7 @@ "rrweb-cssom": "^0.7.1", "saxes": "^6.0.0", "symbol-tree": "^3.2.4", - "tough-cookie": "^4.1.4", + "tough-cookie": "^5.0.0", "w3c-xmlserializer": "^5.0.0", "webidl-conversions": "^7.0.0", "whatwg-encoding": "^3.1.1", @@ -7919,13 +7910,6 @@ "node": ">= 8" } }, - "node_modules/psl": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", - "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==", - "dev": true, - "license": "MIT" - }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -7954,13 +7938,6 @@ "license": "MIT", "peer": true }, - "node_modules/querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", - "dev": true, - "license": "MIT" - }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -8247,13 +8224,6 @@ "node": ">=0.10.0" } }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", - "dev": true, - "license": "MIT" - }, "node_modules/reselect": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.1.tgz", @@ -8857,6 +8827,24 @@ "node": ">=14.0.0" } }, + "node_modules/tldts": { + "version": "6.1.48", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-6.1.48.tgz", + "integrity": "sha512-SPbnh1zaSzi/OsmHb1vrPNnYuwJbdWjwo5TbBYYMlTtH3/1DSb41t8bcSxkwDmmbG2q6VLPVvQc7Yf23T+1EEw==", + "dev": true, + "dependencies": { + "tldts-core": "^6.1.48" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "6.1.48", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-6.1.48.tgz", + "integrity": "sha512-3gD9iKn/n2UuFH1uilBviK9gvTNT6iYwdqrj1Vr5mh8FuelvpRNaYVH4pNYqUgOGU4aAdL9X35eLuuj0gRsx+A==", + "dev": true + }, "node_modules/tmpl": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", @@ -8889,19 +8877,15 @@ } }, "node_modules/tough-cookie": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", - "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-5.0.0.tgz", + "integrity": "sha512-FRKsF7cz96xIIeMZ82ehjC3xW2E+O2+v11udrDYewUbszngYhsGa8z6YUMMzO9QJZzzyd0nGGXnML/TReX6W8Q==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { - "psl": "^1.1.33", - "punycode": "^2.1.1", - "universalify": "^0.2.0", - "url-parse": "^1.5.3" + "tldts": "^6.1.32" }, "engines": { - "node": ">=6" + "node": ">=16" } }, "node_modules/tr46": { @@ -9041,16 +9025,6 @@ "dev": true, "license": "MIT" }, - "node_modules/universalify": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", - "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4.0.0" - } - }, "node_modules/update-browserslist-db": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", @@ -9111,17 +9085,6 @@ "punycode": "^2.1.0" } }, - "node_modules/url-parse": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", - "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, "node_modules/use-callback-ref": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.2.tgz", From 1e18e5c6613f3684994873b19c6ef41f1c616877 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:13:57 +0000 Subject: [PATCH 50/74] Bump vite from 5.4.2 to 5.4.8 in /web/ui Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.4.2 to 5.4.8. - [Release notes](https://github.com/vitejs/vite/releases) - [Changelog](https://github.com/vitejs/vite/blob/v5.4.8/packages/vite/CHANGELOG.md) - [Commits](https://github.com/vitejs/vite/commits/v5.4.8/packages/vite) --- updated-dependencies: - dependency-name: vite dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 13 ++++++------- web/ui/package.json | 2 +- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index e8244022b..728edf8b0 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -67,7 +67,7 @@ "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", - "vite": "^5.1.0", + "vite": "^5.4.8", "vitest": "^2.1.1" } } diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index b580a3aa9..63f1064ad 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -19,7 +19,7 @@ "prettier": "^3.3.3", "ts-jest": "^29.2.2", "typescript": "^5.6.2", - "vite": "^5.1.0" + "vite": "^5.4.8" } }, "mantine-ui": { @@ -81,7 +81,7 @@ "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", - "vite": "^5.1.0", + "vite": "^5.4.8", "vitest": "^2.1.1" } }, @@ -9261,14 +9261,13 @@ } }, "node_modules/vite": { - "version": "5.4.2", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.2.tgz", - "integrity": "sha512-dDrQTRHp5C1fTFzcSaMxjk6vdpKvT+2/mIdE07Gw2ykehT49O0z/VHS3zZ8iV/Gh8BJJKHWOe5RjaNrW5xf/GA==", + "version": "5.4.8", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.8.tgz", + "integrity": "sha512-FqrItQ4DT1NC4zCUqMB4c4AZORMKIa0m8/URVCZ77OZ/QSNeJ54bU1vrFADbDsuwfIPcgknRkmqakQcgnL4GiQ==", "dev": true, - "license": "MIT", "dependencies": { "esbuild": "^0.21.3", - "postcss": "^8.4.41", + "postcss": "^8.4.43", "rollup": "^4.20.0" }, "bin": { diff --git a/web/ui/package.json b/web/ui/package.json index 639ef70ab..c65e3109d 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -22,6 +22,6 @@ "prettier": "^3.3.3", "ts-jest": "^29.2.2", "typescript": "^5.6.2", - "vite": "^5.1.0" + "vite": "^5.4.8" } } From fc01573daa405a602530cb778e14d1c7f8007a9e Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 2 Oct 2024 06:30:13 +0200 Subject: [PATCH 51/74] Allow blank issue reports again I frequently find myself in the situation where the standard bug issue template fields are all irrelevant for what I want to report, and then I have to first shoehorn my info into the template somehow, save the issue, edit it, and remove all the unnecessary parts. This demotivates me from filing casual issues, e.g. when I see a CI test fail. We should have a way of still filing custom issues without all the templatey bits. Signed-off-by: Julius Volz --- .github/ISSUE_TEMPLATE/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f4d17b359..bb4e2d24c 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,4 @@ -blank_issues_enabled: false +blank_issues_enabled: true contact_links: - name: Prometheus Community Support url: https://prometheus.io/community/ From 4782b75d114618789fd3992856f27b9d974711cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:48:12 +0000 Subject: [PATCH 52/74] Bump @codemirror/autocomplete from 6.18.0 to 6.18.1 in /web/ui Bumps [@codemirror/autocomplete](https://github.com/codemirror/autocomplete) from 6.18.0 to 6.18.1. - [Changelog](https://github.com/codemirror/autocomplete/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/autocomplete/compare/6.18.0...6.18.1) --- updated-dependencies: - dependency-name: "@codemirror/autocomplete" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 11 +++++------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 3cca4a2fc..9ee212a37 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -12,7 +12,7 @@ "test": "vitest" }, "dependencies": { - "@codemirror/autocomplete": "^6.18.0", + "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.4.1", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index cf05c6f64..3e459e83a 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -33,7 +33,7 @@ "lru-cache": "^11.0.1" }, "devDependencies": { - "@codemirror/autocomplete": "^6.17.0", + "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index d9d867e52..c2a4860ae 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -26,7 +26,7 @@ "name": "@prometheus-io/mantine-ui", "version": "0.300.0-beta.0", "dependencies": { - "@codemirror/autocomplete": "^6.18.0", + "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.4.1", @@ -167,7 +167,7 @@ "lru-cache": "^11.0.1" }, "devDependencies": { - "@codemirror/autocomplete": "^6.17.0", + "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", @@ -913,10 +913,9 @@ "peer": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.18.0", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.0.tgz", - "integrity": "sha512-5DbOvBbY4qW5l57cjDsmmpDh3/TeK1vXfTHa+BUMrRzdWdcxKZ4U4V7vQaTtOpApNU4kLS4FQ6cINtLg245LXA==", - "license": "MIT", + "version": "6.18.1", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.1.tgz", + "integrity": "sha512-iWHdj/B1ethnHRTwZj+C1obmmuCzquH29EbcKr0qIjA9NfDeBDJ7vs+WOHsFeLeflE4o+dHfYndJloMKHUkWUA==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", From ab808f6e646e901a4b7d6cd953fc161945d02d72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:07:01 +0000 Subject: [PATCH 53/74] Bump eslint from 9.9.1 to 9.11.1 in /web/ui Bumps [eslint](https://github.com/eslint/eslint) from 9.9.1 to 9.11.1. - [Release notes](https://github.com/eslint/eslint/releases) - [Changelog](https://github.com/eslint/eslint/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint/eslint/compare/v9.9.1...v9.11.1) --- updated-dependencies: - dependency-name: eslint dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 223 ++++++++++++++++----------------- 2 files changed, 108 insertions(+), 117 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 2487d27a2..8b60cbb37 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -59,7 +59,7 @@ "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "@vitejs/plugin-react": "^4.2.1", - "eslint": "^9.9.1", + "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 8ca1c2b60..ac3d76283 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -73,7 +73,7 @@ "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "@vitejs/plugin-react": "^4.2.1", - "eslint": "^9.9.1", + "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", @@ -85,21 +85,30 @@ "vitest": "^2.1.1" } }, + "mantine-ui/node_modules/@types/estree": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "dev": true + }, "mantine-ui/node_modules/eslint": { - "version": "9.9.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.9.1.tgz", - "integrity": "sha512-dHvhrbfr4xFQ9/dq+jcVneZMyRYLjggWjk6RVsIiHsP8Rz6yZ8LvZ//iU4TrZF+SXWG+JkNF2OyiZRvzgRDqMg==", + "version": "9.11.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.11.1.tgz", + "integrity": "sha512-MobhYKIoAO1s1e4VUrgx1l1Sk2JBR/Gqjjgw8+mfgoLE2xwsHur4gdfTxyTgShrhvdVFTaJSgMiQBl1jv/AWxg==", "dev": true, - "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.11.0", "@eslint/config-array": "^0.18.0", + "@eslint/core": "^0.6.0", "@eslint/eslintrc": "^3.1.0", - "@eslint/js": "9.9.1", + "@eslint/js": "9.11.1", + "@eslint/plugin-kit": "^0.2.0", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.3.0", "@nodelib/fs.walk": "^1.2.8", + "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", @@ -119,7 +128,6 @@ "is-glob": "^4.0.0", "is-path-inside": "^3.0.3", "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.2", "natural-compare": "^1.4.0", @@ -145,15 +153,6 @@ } } }, - "mantine-ui/node_modules/eslint/node_modules/@eslint/js": { - "version": "9.9.1", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.9.1.tgz", - "integrity": "sha512-xIDQRsfg5hNBqHz04H1R3scSVwmI+KUbqjsQKHKQ1DAUSaUjYPReZZmS/5PNiKu1fUvzDd6H7DEDKACSEhu+TQ==", - "dev": true, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, "mantine-ui/node_modules/globals": { "version": "15.9.0", "resolved": "https://registry.npmjs.org/globals/-/globals-15.9.0.tgz", @@ -1460,7 +1459,6 @@ "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.18.0.tgz", "integrity": "sha512-fTxvnS1sRMu3+JjXwJG0j/i4RT9u4qJ+lqS/yCGap4lH4zZGzQ7tu+xZqQmcMZq5OBZDL4QRxQzRjkWcGt8IVw==", "dev": true, - "license": "Apache-2.0", "dependencies": { "@eslint/object-schema": "^2.1.4", "debug": "^4.3.1", @@ -1470,6 +1468,15 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, + "node_modules/@eslint/core": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.6.0.tgz", + "integrity": "sha512-8I2Q8ykA4J0x0o7cg67FPVnehcqWTBehu/lmY+bolPFHGjh49YzGBMXTvpqVgEbBdvNCSxj6iFgiIyHzf03lzg==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "node_modules/@eslint/eslintrc": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.1.0.tgz", @@ -1508,7 +1515,18 @@ "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.4.tgz", "integrity": "sha512-BsWiH1yFGjXXS2yvrf5LyuoSIIbPrGUWob917o+BTKuZ7qJdxX8aJLRxs1fS9n6r7vESrq1OUqb68dANcFXuQQ==", "dev": true, - "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.0.tgz", + "integrity": "sha512-vH9PiIMMwvhCx31Af3HiGzsVNULDbyVkHXwlemn/B0TFj/00ho3y55efXrUZTfQipxoHC5u4xq6zblww1zm1Ig==", + "dev": true, + "dependencies": { + "levn": "^0.4.1" + }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } @@ -1567,15 +1585,14 @@ "license": "MIT" }, "node_modules/@humanwhocodes/config-array": { - "version": "0.11.14", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", - "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", "deprecated": "Use @eslint/config-array instead", "dev": true, - "license": "Apache-2.0", "peer": true, "dependencies": { - "@humanwhocodes/object-schema": "^2.0.2", + "@humanwhocodes/object-schema": "^2.0.3", "debug": "^4.3.1", "minimatch": "^3.0.5" }, @@ -1603,7 +1620,6 @@ "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", "deprecated": "Use @eslint/object-schema instead", "dev": true, - "license": "BSD-3-Clause", "peer": true }, "node_modules/@humanwhocodes/retry": { @@ -1611,7 +1627,6 @@ "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.0.tgz", "integrity": "sha512-d2CGZR2o7fS6sWB7DG/3a95bGKQyHMACZ5aW8qGkkqQpUoZV6C0X7Pc7l4ZNMZkfNBf4VWNe9E1jRsf0G146Ew==", "dev": true, - "license": "Apache-2.0", "engines": { "node": ">=18.18" }, @@ -2964,8 +2979,7 @@ "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@types/lodash": { "version": "4.17.9", @@ -3029,8 +3043,7 @@ "version": "7.5.8", "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@types/stack-utils": { "version": "2.0.3", @@ -3098,6 +3111,58 @@ } } }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/type-utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + } + }, "node_modules/@typescript-eslint/parser": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", @@ -3145,34 +3210,6 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/type-utils": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", - "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/typescript-estree": "6.21.0", - "@typescript-eslint/utils": "6.21.0", - "debug": "^4.3.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, "node_modules/@typescript-eslint/types": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", @@ -3242,32 +3279,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/@typescript-eslint/utils": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", - "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.4.0", - "@types/json-schema": "^7.0.12", - "@types/semver": "^7.5.0", - "@typescript-eslint/scope-manager": "6.21.0", - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/typescript-estree": "6.21.0", - "semver": "^7.5.4" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" - } - }, "node_modules/@typescript-eslint/visitor-keys": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", @@ -3355,7 +3366,6 @@ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", "dev": true, - "license": "ISC", "peer": true }, "node_modules/@vitejs/plugin-react": { @@ -4372,7 +4382,6 @@ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", "dev": true, - "license": "Apache-2.0", "peer": true, "dependencies": { "esutils": "^2.0.2" @@ -4583,18 +4592,17 @@ } }, "node_modules/eslint": { - "version": "8.57.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz", - "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", "@eslint/eslintrc": "^2.1.4", - "@eslint/js": "8.57.0", - "@humanwhocodes/config-array": "^0.11.14", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", "@ungap/structured-clone": "^1.2.0", @@ -4706,11 +4714,10 @@ } }, "node_modules/eslint-scope": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.0.2.tgz", - "integrity": "sha512-6E4xmrTw5wtxnLA5wYL3WDfhZ/1bUBGOXV0zQvVRDOtrR8D0p6W7fs3JweNYhwRYeGvd/1CKX2se0/2s7Q/nJA==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.1.0.tgz", + "integrity": "sha512-14dSvlhaVhKKsa9Fx1l8A17s7ah7Ef7wCakJ10LYk6+GYmP9yDti2oq2SEwcyndt6knfcZyhyxwY3i9yL78EQw==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" @@ -4740,7 +4747,6 @@ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "ajv": "^6.12.4", @@ -4761,11 +4767,10 @@ } }, "node_modules/eslint/node_modules/@eslint/js": { - "version": "8.57.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz", - "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4776,7 +4781,6 @@ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, - "license": "BSD-2-Clause", "peer": true, "dependencies": { "esrecurse": "^4.3.0", @@ -4794,7 +4798,6 @@ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, - "license": "Apache-2.0", "peer": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4808,7 +4811,6 @@ "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, - "license": "BSD-2-Clause", "peer": true, "dependencies": { "acorn": "^8.9.0", @@ -4827,7 +4829,6 @@ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "flat-cache": "^3.0.4" @@ -4841,7 +4842,6 @@ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "flatted": "^3.2.9", @@ -4857,7 +4857,6 @@ "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "type-fest": "^0.20.2" @@ -4874,7 +4873,6 @@ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", "dev": true, - "license": "(MIT OR CC0-1.0)", "peer": true, "engines": { "node": ">=10" @@ -4934,7 +4932,6 @@ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "estraverse": "^5.2.0" }, @@ -5107,7 +5104,6 @@ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "dev": true, - "license": "MIT", "dependencies": { "flat-cache": "^4.0.0" }, @@ -5183,7 +5179,6 @@ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", "dev": true, - "license": "MIT", "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" @@ -5196,8 +5191,7 @@ "version": "3.3.1", "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/form-data": { "version": "4.0.0", @@ -6895,8 +6889,7 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", @@ -6945,7 +6938,6 @@ "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "dev": true, - "license": "MIT", "dependencies": { "json-buffer": "3.0.1" } @@ -8310,7 +8302,6 @@ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, - "license": "ISC", "peer": true, "dependencies": { "glob": "^7.1.3" From 98cf5942ae054cb5223b69087866e87bef5ee10a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:07:46 +0000 Subject: [PATCH 54/74] Bump postcss from 8.4.44 to 8.4.47 in /web/ui Bumps [postcss](https://github.com/postcss/postcss) from 8.4.44 to 8.4.47. - [Release notes](https://github.com/postcss/postcss/releases) - [Changelog](https://github.com/postcss/postcss/blob/main/CHANGELOG.md) - [Commits](https://github.com/postcss/postcss/compare/8.4.44...8.4.47) --- updated-dependencies: - dependency-name: postcss dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 27 ++++++++++++--------------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 2487d27a2..e1079805f 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -64,7 +64,7 @@ "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", "jsdom": "^25.0.1", - "postcss": "^8.4.35", + "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", "vite": "^5.4.8", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 8ca1c2b60..7137da077 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -78,7 +78,7 @@ "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", "jsdom": "^25.0.1", - "postcss": "^8.4.35", + "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", "vite": "^5.4.8", @@ -7555,10 +7555,9 @@ } }, "node_modules/picocolors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", - "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==", - "license": "ISC" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", + "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==" }, "node_modules/picomatch": { "version": "2.3.1", @@ -7659,9 +7658,9 @@ } }, "node_modules/postcss": { - "version": "8.4.44", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.44.tgz", - "integrity": "sha512-Aweb9unOEpQ3ezu4Q00DPvvM2ZTUitJdNKeP/+uQgr1IBIqu574IaZoURId7BKtWMREwzKa9OgzPzezWGPWFQw==", + "version": "8.4.47", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.47.tgz", + "integrity": "sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==", "funding": [ { "type": "opencollective", @@ -7676,11 +7675,10 @@ "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "dependencies": { "nanoid": "^3.3.7", - "picocolors": "^1.0.1", - "source-map-js": "^1.2.0" + "picocolors": "^1.1.0", + "source-map-js": "^1.2.1" }, "engines": { "node": "^10 || ^12 || >=14" @@ -8519,10 +8517,9 @@ } }, "node_modules/source-map-js": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", - "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", - "license": "BSD-3-Clause", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", "engines": { "node": ">=0.10.0" } From c0a0520b5455deca7543342a843d6b29971056c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:55:37 +0000 Subject: [PATCH 55/74] Bump globals from 15.9.0 to 15.10.0 in /web/ui Bumps [globals](https://github.com/sindresorhus/globals) from 15.9.0 to 15.10.0. - [Release notes](https://github.com/sindresorhus/globals/releases) - [Commits](https://github.com/sindresorhus/globals/compare/v15.9.0...v15.10.0) --- updated-dependencies: - dependency-name: globals dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 34 ++++++++++++++++------------------ 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 329933625..ee9f2f1e6 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -62,7 +62,7 @@ "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", - "globals": "^15.9.0", + "globals": "^15.10.0", "jsdom": "^25.0.1", "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f33f54562..5616a8381 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -76,7 +76,7 @@ "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", - "globals": "^15.9.0", + "globals": "^15.10.0", "jsdom": "^25.0.1", "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", @@ -153,19 +153,6 @@ } } }, - "mantine-ui/node_modules/globals": { - "version": "15.9.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-15.9.0.tgz", - "integrity": "sha512-SmSKyLLKFbSr6rptvP8izbyxJL4ILwqO9Jg23UA0sDlGlu58V59D1//I3vlc0KJphVdUR7vMjHIplYnzBxorQA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", "version": "0.300.0-beta.0", @@ -1500,6 +1487,18 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@eslint/js": { "version": "9.11.1", "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.11.1.tgz", @@ -5341,11 +5340,10 @@ } }, "node_modules/globals": { - "version": "14.0.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", - "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "version": "15.10.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-15.10.0.tgz", + "integrity": "sha512-tqFIbz83w4Y5TCbtgjZjApohbuh7K9BxGYFm7ifwDR240tvdb7P9x+/9VvUKlmkPoiknoJtanI8UOrqxS3a7lQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=18" }, From 4f448d4c4ca9a47af1549fb38e24246e775693d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:56:23 +0000 Subject: [PATCH 56/74] Bump @tabler/icons-react from 2.47.0 to 3.19.0 in /web/ui Bumps [@tabler/icons-react](https://github.com/tabler/tabler-icons/tree/HEAD/packages/icons-react) from 2.47.0 to 3.19.0. - [Release notes](https://github.com/tabler/tabler-icons/releases) - [Commits](https://github.com/tabler/tabler-icons/commits/v3.19.0/packages/icons-react) --- updated-dependencies: - dependency-name: "@tabler/icons-react" dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 21 +++++++++------------ 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 329933625..514f78d8c 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -30,7 +30,7 @@ "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@reduxjs/toolkit": "^2.2.1", - "@tabler/icons-react": "^2.47.0", + "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f33f54562..11fbb57f0 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -44,7 +44,7 @@ "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@reduxjs/toolkit": "^2.2.1", - "@tabler/icons-react": "^2.47.0", + "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", @@ -2696,30 +2696,27 @@ } }, "node_modules/@tabler/icons": { - "version": "2.47.0", - "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-2.47.0.tgz", - "integrity": "sha512-4w5evLh+7FUUiA1GucvGj2ReX2TvOjEr4ejXdwL/bsjoSkof6r1gQmzqI+VHrE2CpJpB3al7bCTulOkFa/RcyA==", - "license": "MIT", + "version": "3.19.0", + "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.19.0.tgz", + "integrity": "sha512-A4WEWqpdbTfnpFEtwXqwAe9qf9sp1yRPvzppqAuwcoF0q5YInqB+JkJtSFToCyBpPVeLxJUxxkapLvt2qQgnag==", "funding": { "type": "github", "url": "https://github.com/sponsors/codecalm" } }, "node_modules/@tabler/icons-react": { - "version": "2.47.0", - "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-2.47.0.tgz", - "integrity": "sha512-iqly2FvCF/qUbgmvS8E40rVeYY7laltc5GUjRxQj59DuX0x/6CpKHTXt86YlI2whg4czvd/c8Ce8YR08uEku0g==", - "license": "MIT", + "version": "3.19.0", + "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.19.0.tgz", + "integrity": "sha512-AqEWGI0tQWgqo6ZjMO5yJ9sYT8oXLuAM/up0hN9iENS6IdtNZryKrkNSiMgpwweNTpl8wFFG/dAZ959S91A/uQ==", "dependencies": { - "@tabler/icons": "2.47.0", - "prop-types": "^15.7.2" + "@tabler/icons": "3.19.0" }, "funding": { "type": "github", "url": "https://github.com/sponsors/codecalm" }, "peerDependencies": { - "react": "^16.5.1 || ^17.0.0 || ^18.0.0" + "react": ">= 16" } }, "node_modules/@tanstack/query-core": { From d3b0ab453cc584fbec5bdf67586f782d8765f3db Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 2 Oct 2024 08:05:20 +0200 Subject: [PATCH 57/74] Fix tabler icon props import after version bump Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/components/InfoPageCard.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/ui/mantine-ui/src/components/InfoPageCard.tsx b/web/ui/mantine-ui/src/components/InfoPageCard.tsx index 3d0817e6d..f6797133c 100644 --- a/web/ui/mantine-ui/src/components/InfoPageCard.tsx +++ b/web/ui/mantine-ui/src/components/InfoPageCard.tsx @@ -1,12 +1,12 @@ import { Card, Group } from "@mantine/core"; -import { TablerIconsProps } from "@tabler/icons-react"; +import { IconProps } from "@tabler/icons-react"; import { FC, ReactNode } from "react"; import { infoPageCardTitleIconStyle } from "../styles"; const InfoPageCard: FC<{ children: ReactNode; title?: string; - icon?: React.ComponentType; + icon?: React.ComponentType; }> = ({ children, title, icon: Icon }) => { return ( From 98cd80b2e224220d8a4511d21522bb70c9e834a8 Mon Sep 17 00:00:00 2001 From: Manik Rana Date: Wed, 2 Oct 2024 12:18:27 +0530 Subject: [PATCH 58/74] feat: add microbenchmarks for OM CT parsing (#14933) * test: benchmark OM CT parsing Signed-off-by: Manik Rana * refac: move OM ct benchmark to promparse_test Signed-off-by: Manik Rana * chore: stricter comparison Co-authored-by: Arthur Silva Sens Signed-off-by: Manik Rana * feat: use richer OM test data Signed-off-by: Manik Rana * refac: move parse-ct test outside of inner loop Signed-off-by: Manik Rana * refac: separate benchmarks for om and prom parsers Signed-off-by: Manik Rana * chore: remove unused code Signed-off-by: Manik Rana * chore: remove more unused code Signed-off-by: Manik Rana * refac: rename to BenchmarkOMParseCreatedTimestamp Co-authored-by: Bartlomiej Plotka Signed-off-by: Manik Rana --------- Signed-off-by: Manik Rana Signed-off-by: Manik Rana Co-authored-by: Arthur Silva Sens Co-authored-by: Bartlomiej Plotka --- model/textparse/omtestdata.txt | 64 ++++++++++++++++++++++++ model/textparse/openmetricsparse_test.go | 44 ++++++++++++++++ model/textparse/promparse_test.go | 2 +- 3 files changed, 109 insertions(+), 1 deletion(-) create mode 100644 model/textparse/omtestdata.txt diff --git a/model/textparse/omtestdata.txt b/model/textparse/omtestdata.txt new file mode 100644 index 000000000..0f5f78b8b --- /dev/null +++ b/model/textparse/omtestdata.txt @@ -0,0 +1,64 @@ +# HELP go_build_info Build information about the main Go module. +# TYPE go_build_info gauge +go_build_info{checksum="",path="",version=""} 1.0 +# HELP promhttp_metric_handler_errors Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors counter +promhttp_metric_handler_errors_total{cause="encoding"} 0.0 +promhttp_metric_handler_errors_created{cause="encoding"} 1.726839813016397e+09 +promhttp_metric_handler_errors_total{cause="gathering"} 0.0 +promhttp_metric_handler_errors_created{cause="gathering"} 1.726839813016395e+09 +# HELP rpc_durations_histogram_seconds RPC latency distributions. +# TYPE rpc_durations_histogram_seconds histogram +rpc_durations_histogram_seconds_bucket{le="-0.00099"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.00089"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0007899999999999999"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0006899999999999999"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0005899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0004899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0003899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0002899999999999998"} 3 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09 +rpc_durations_histogram_seconds_bucket{le="-0.0001899999999999998"} 5 # {dummyID="84741"} -0.00020178290006788965 1.726839814829977e+09 +rpc_durations_histogram_seconds_bucket{le="-8.999999999999979e-05"} 5 +rpc_durations_histogram_seconds_bucket{le="1.0000000000000216e-05"} 8 # {dummyID="19206"} -4.6156147425468016e-05 1.7268398151337721e+09 +rpc_durations_histogram_seconds_bucket{le="0.00011000000000000022"} 9 # {dummyID="3974"} 9.528436760156754e-05 1.726839814526797e+09 +rpc_durations_histogram_seconds_bucket{le="0.00021000000000000023"} 11 # {dummyID="29640"} 0.00017459624183458996 1.7268398139220061e+09 +rpc_durations_histogram_seconds_bucket{le="0.0003100000000000002"} 15 # {dummyID="9818"} 0.0002791130914009552 1.7268398149821382e+09 +rpc_durations_histogram_seconds_bucket{le="0.0004100000000000002"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0005100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0006100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0007100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0008100000000000004"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0009100000000000004"} 15 +rpc_durations_histogram_seconds_bucket{le="+Inf"} 15 +rpc_durations_histogram_seconds_sum -8.452185437166741e-05 +rpc_durations_histogram_seconds_count 15 +rpc_durations_histogram_seconds_created 1.726839813016302e+09 +# HELP rpc_durations_seconds RPC latency distributions. +# TYPE rpc_durations_seconds summary +rpc_durations_seconds{service="exponential",quantile="0.5"} 7.689368882420941e-07 +rpc_durations_seconds{service="exponential",quantile="0.9"} 1.6537614174305048e-06 +rpc_durations_seconds{service="exponential",quantile="0.99"} 2.0965499063061924e-06 +rpc_durations_seconds_sum{service="exponential"} 2.0318666372575776e-05 +rpc_durations_seconds_count{service="exponential"} 22 +rpc_durations_seconds_created{service="exponential"} 1.7268398130168908e+09 +rpc_durations_seconds{service="normal",quantile="0.5"} -5.066758674917046e-06 +rpc_durations_seconds{service="normal",quantile="0.9"} 0.0002935723711788224 +rpc_durations_seconds{service="normal",quantile="0.99"} 0.0003023094636293776 +rpc_durations_seconds_sum{service="normal"} -8.452185437166741e-05 +rpc_durations_seconds_count{service="normal"} 15 +rpc_durations_seconds_created{service="normal"} 1.726839813016714e+09 +rpc_durations_seconds{service="uniform",quantile="0.5"} 9.005014931474918e-05 +rpc_durations_seconds{service="uniform",quantile="0.9"} 0.00017801230208182325 +rpc_durations_seconds{service="uniform",quantile="0.99"} 0.00018641524538180192 +rpc_durations_seconds_sum{service="uniform"} 0.0011666095700533677 +rpc_durations_seconds_count{service="uniform"} 11 +rpc_durations_seconds_created{service="uniform"} 1.72683981301684e+09 +# HELP rpc_requests Total number of RPC requests received. +# TYPE rpc_requests counter +rpc_requests_total{service="exponential"} 22.0 +rpc_requests_created{service="exponential"} 1.726839813016893e+09 +rpc_requests_total{service="normal"} 15.0 +rpc_requests_created{service="normal"} 1.726839813016717e+09 +rpc_requests_total{service="uniform"} 11.0 +rpc_requests_created{service="uniform"} 1.7268398130168471e+09 +# EOF diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index cadaabc99..ce1261f5c 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -16,6 +16,7 @@ package textparse import ( "errors" "io" + "os" "testing" "github.com/prometheus/common/model" @@ -992,3 +993,46 @@ go_gc_duration_seconds_created`) require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]])) require.False(t, copyParser.skipCTSeries) } + +func BenchmarkOMParseCreatedTimestamp(b *testing.B) { + for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ + "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st) + }, + "openmetrics-skip-ct": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + }, + } { + f, err := os.Open("omtestdata.txt") + require.NoError(b, err) + defer f.Close() + + buf, err := io.ReadAll(f) + require.NoError(b, err) + + b.Run(parserName+"/parse-ct/"+"omtestdata.txt", func(b *testing.B) { + b.SetBytes(int64(len(buf) / promtestdataSampleCount)) + b.ReportAllocs() + b.ResetTimer() + + st := labels.NewSymbolTable() + for i := 0; i < b.N; i += promtestdataSampleCount { + p := parser(buf, st) + + Outer: + for i < b.N { + t, err := p.Next() + switch t { + case EntryInvalid: + if errors.Is(err, io.EOF) { + break Outer + } + b.Fatal(err) + case EntrySeries: + p.CreatedTimestamp() + } + } + } + }) + } +} diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index ce9daf53e..4520dfe9a 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -492,7 +492,7 @@ const ( promtestdataSampleCount = 410 ) -func BenchmarkParse(b *testing.B) { +func BenchmarkPromParse(b *testing.B) { for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ "prometheus": NewPromParser, "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { From e99e7ca9cfa4f0eb2946f225d82aa9fdec835752 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Wed, 2 Oct 2024 12:40:31 +0200 Subject: [PATCH 59/74] README: Update readme with API flag change for the otlp receiver (#15073) Signed-off-by: Jesus Vazquez --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7fbdadfa6..8874d254f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## unreleased * [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930 +* [CHANGE] API: The OTLP receiver endpoint can now be enabled using `--web.enable-otlp-receiver` instead of `--enable-feature=otlp-write-receiver`. #14894 * [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910 ## 3.0.0-beta.0 / 2024-09-05 From f1c57a95ed7df70176934a38799354f692ac8963 Mon Sep 17 00:00:00 2001 From: Manik Rana Date: Wed, 2 Oct 2024 16:22:03 +0530 Subject: [PATCH 60/74] change: No longer ingest OM _created as timeseries if feature-flag 'enable-ct-zero-ingestion' is enabled; fixed OM text CT conversion bug (#14738) * chore: revert TypeRequiresCT to private Signed-off-by: Manik Rana * feat: init NewOpenMetricsParser with skipCT true Signed-off-by: Manik Rana * refac: allow opt-in to OM CT ingestion Signed-off-by: Manik Rana * chore: lint Signed-off-by: Manik Rana * chore: use textparse interface to set om options Signed-off-by: Manik Rana * fix: set skipOMSeries in test Signed-off-by: Manik Rana * chore: gofumpt Signed-off-by: Manik Rana * wip: add tests for OM CR parse Signed-off-by: Manik Rana * chore: merge ct tests Signed-off-by: Manik Rana * tests: add cases for OM text Signed-off-by: Manik Rana * fix: check correct test cases Signed-off-by: Manik Rana * chore: use both scrape protocols in config Signed-off-by: Manik Rana * fix: fix inputs and output tests for OM Signed-off-by: Manik Rana * chore: cleanup Signed-off-by: Manik Rana * refac: rename skipOMSeries to skipOMCTSeries Co-authored-by: Arthur Silva Sens Signed-off-by: Manik Rana * fix: finish refac Signed-off-by: Manik Rana * refac: move setup code outside test Signed-off-by: Manik Rana * tests: verify _created lines create new metric in certain cases Signed-off-by: Manik Rana * fix: post merge fixes Signed-off-by: Manik Rana * chore: lint Signed-off-by: Manik Rana * manager: Fixed CT OMText conversion bug; Refactored tests. Signed-off-by: bwplotka * chore: lint Signed-off-by: Manik Rana * chore: gofumpt Signed-off-by: Manik Rana * chore: imports Signed-off-by: Manik Rana --------- Signed-off-by: Manik Rana Signed-off-by: Manik Rana Signed-off-by: bwplotka Co-authored-by: Arthur Silva Sens Co-authored-by: bwplotka --- model/textparse/interface.go | 6 +- model/textparse/interface_test.go | 2 +- model/textparse/openmetricsparse.go | 5 +- model/textparse/openmetricsparse_test.go | 70 +++--- promql/fuzz.go | 2 +- scrape/manager_test.go | 304 ++++++++++++++--------- scrape/scrape.go | 2 +- scrape/scrape_test.go | 2 +- 8 files changed, 227 insertions(+), 166 deletions(-) diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 0b5d9281e..7de88a486 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -80,7 +80,7 @@ type Parser interface { // // This function always returns a valid parser, but might additionally // return an error if the content type cannot be parsed. -func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.SymbolTable) (Parser, error) { +func New(b []byte, contentType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) { if contentType == "" { return NewPromParser(b, st), nil } @@ -91,7 +91,9 @@ func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.S } switch mediaType { case "application/openmetrics-text": - return NewOpenMetricsParser(b, st), nil + return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) { + o.SkipCTSeries = skipOMCTSeries + }), nil case "application/vnd.google.protobuf": return NewProtobufParser(b, parseClassicHistograms, st), nil default: diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index c64456562..970b96706 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -93,7 +93,7 @@ func TestNewParser(t *testing.T) { tt := tt // Copy to local variable before going parallel. t.Parallel() - p, err := New([]byte{}, tt.contentType, false, labels.NewSymbolTable()) + p, err := New([]byte{}, tt.contentType, false, false, labels.NewSymbolTable()) tt.validateParser(t, p) if tt.err == "" { require.NoError(t, err) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index ea7607c3a..8ec1b62ff 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -297,7 +297,10 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { // CT line for a different series, for our series no CT. return nil } - ct := int64(peek.val) + + // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. + // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps + ct := int64(peek.val * 1000.0) return &ct } } diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index ce1261f5c..93033380b 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -70,23 +70,23 @@ testmetric{label="\"bar\""} 1 # HELP foo Counter with and without labels to certify CT is parsed for both cases # TYPE foo counter foo_total 17.0 1520879607.789 # {id="counter-test"} 5 -foo_created 1000 +foo_created 1520872607.123 foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 -foo_created{a="b"} 1000 +foo_created{a="b"} 1520872607.123 # HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far # TYPE bar summary bar_count 17.0 bar_sum 324789.3 bar{quantile="0.95"} 123.7 bar{quantile="0.99"} 150.0 -bar_created 1520430000 +bar_created 1520872607.123 # HELP baz Histogram with the same objective as above's summary # TYPE baz histogram baz_bucket{le="0.0"} 0 baz_bucket{le="+Inf"} 17 baz_count 17 baz_sum 324789.3 -baz_created 1520430000 +baz_created 1520872607.123 # HELP fizz_created Gauge which shouldn't be parsed as CT # TYPE fizz_created gauge fizz_created 17.0` @@ -251,14 +251,14 @@ fizz_created 17.0` lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, - ct: int64p(1000), + ct: int64p(1520872607123), }, { m: `foo_total{a="b"}`, v: 17.0, lset: labels.FromStrings("__name__", "foo_total", "a", "b"), t: int64p(1520879607789), e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, - ct: int64p(1000), + ct: int64p(1520872607123), }, { m: "bar", help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", @@ -269,22 +269,22 @@ fizz_created 17.0` m: "bar_count", v: 17.0, lset: labels.FromStrings("__name__", "bar_count"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: "bar_sum", v: 324789.3, lset: labels.FromStrings("__name__", "bar_sum"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: `bar{quantile="0.95"}`, v: 123.7, lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: `bar{quantile="0.99"}`, v: 150.0, lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: "baz", help: "Histogram with the same objective as above's summary", @@ -295,22 +295,22 @@ fizz_created 17.0` m: `baz_bucket{le="0.0"}`, v: 0, lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: `baz_bucket{le="+Inf"}`, v: 17, lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: `baz_count`, v: 17, lset: labels.FromStrings("__name__", "baz_count"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: `baz_sum`, v: 324789.3, lset: labels.FromStrings("__name__", "baz_sum"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: "fizz_created", help: "Gauge which shouldn't be parsed as CT", @@ -347,7 +347,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) { # UNIT "go.gc_duration_seconds" seconds {"go.gc_duration_seconds",quantile="0"} 4.9351e-05 {"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 -{"go.gc_duration_seconds_created"} 12313 +{"go.gc_duration_seconds_created"} 1520872607.123 {"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 @@ -371,12 +371,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), - ct: int64p(12313), + ct: int64p(1520872607123), }, { m: `{"go.gc_duration_seconds",quantile="0.25"}`, v: 7.424100000000001e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), - ct: int64p(12313), + ct: int64p(1520872607123), }, { m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, v: 8.3835e-05, @@ -700,7 +700,7 @@ func TestOpenMetricsParseErrors(t *testing.T) { } for i, c := range cases { - p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable()) + p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) var err error for err == nil { _, err = p.Next() @@ -765,7 +765,7 @@ func TestOMNullByteHandling(t *testing.T) { } for i, c := range cases { - p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable()) + p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) var err error for err == nil { _, err = p.Next() @@ -788,12 +788,12 @@ func TestCTParseFailures(t *testing.T) { # TYPE something histogram something_count 17 something_sum 324789.3 -something_created 1520430001 +something_created 1520872607.123 something_bucket{le="0.0"} 0 something_bucket{le="+Inf"} 17 # HELP thing Histogram with _created as first line # TYPE thing histogram -thing_created 1520430002 +thing_created 1520872607.123 thing_count 17 thing_sum 324789.3 thing_bucket{le="0.0"} 0 @@ -802,12 +802,12 @@ thing_bucket{le="+Inf"} 17 # TYPE yum summary yum_count 17.0 yum_sum 324789.3 -yum_created 1520430003 +yum_created 1520872607.123 yum{quantile="0.95"} 123.7 yum{quantile="0.99"} 150.0 # HELP foobar Summary with _created as the first line # TYPE foobar summary -foobar_created 1520430004 +foobar_created 1520872607.123 foobar_count 17.0 foobar_sum 324789.3 foobar{quantile="0.95"} 123.7 @@ -836,19 +836,19 @@ foobar{quantile="0.99"} 150.0` isErr: false, }, { m: `something_count`, - ct: int64p(1520430001), + ct: int64p(1520872607123), isErr: false, }, { m: `something_sum`, - ct: int64p(1520430001), + ct: int64p(1520872607123), isErr: false, }, { m: `something_bucket{le="0.0"}`, - ct: int64p(1520430001), + ct: int64p(1520872607123), isErr: true, }, { m: `something_bucket{le="+Inf"}`, - ct: int64p(1520430001), + ct: int64p(1520872607123), isErr: true, }, { m: "thing", @@ -860,19 +860,19 @@ foobar{quantile="0.99"} 150.0` isErr: false, }, { m: `thing_count`, - ct: int64p(1520430002), + ct: int64p(1520872607123), isErr: true, }, { m: `thing_sum`, - ct: int64p(1520430002), + ct: int64p(1520872607123), isErr: true, }, { m: `thing_bucket{le="0.0"}`, - ct: int64p(1520430002), + ct: int64p(1520872607123), isErr: true, }, { m: `thing_bucket{le="+Inf"}`, - ct: int64p(1520430002), + ct: int64p(1520872607123), isErr: true, }, { m: "yum", @@ -884,19 +884,19 @@ foobar{quantile="0.99"} 150.0` isErr: false, }, { m: "yum_count", - ct: int64p(1520430003), + ct: int64p(1520872607123), isErr: false, }, { m: "yum_sum", - ct: int64p(1520430003), + ct: int64p(1520872607123), isErr: false, }, { m: `yum{quantile="0.95"}`, - ct: int64p(1520430003), + ct: int64p(1520872607123), isErr: true, }, { m: `yum{quantile="0.99"}`, - ct: int64p(1520430003), + ct: int64p(1520872607123), isErr: true, }, { m: "foobar", diff --git a/promql/fuzz.go b/promql/fuzz.go index 3fd50b949..57fd1166a 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -61,7 +61,7 @@ const ( var symbolTable = labels.NewSymbolTable() func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType, false, symbolTable) + p, warning := textparse.New(in, contentType, false, false, symbolTable) if warning != nil { // An invalid content type is being passed, which should not happen // in this context. diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 7e01238cc..8d2c3c968 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -14,6 +14,7 @@ package scrape import ( + "bytes" "context" "fmt" "net/http" @@ -30,11 +31,14 @@ import ( "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" _ "github.com/prometheus/prometheus/discovery/file" @@ -719,143 +723,195 @@ scrape_configs: require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools()) } -// TestManagerCTZeroIngestion tests scrape manager for CT cases. -func TestManagerCTZeroIngestion(t *testing.T) { - const mName = "expected_counter" - - for _, tc := range []struct { - name string - counterSample *dto.Counter - enableCTZeroIngestion bool - }{ - { - name: "disabled with CT on counter", - counterSample: &dto.Counter{ - Value: proto.Float64(1.0), - // Timestamp does not matter as long as it exists in this test. - CreatedTimestamp: timestamppb.Now(), - }, +func setupScrapeManager(t *testing.T, honorTimestamps, enableCTZeroIngestion bool) (*collectResultAppender, *Manager) { + app := &collectResultAppender{} + scrapeManager, err := NewManager( + &Options{ + EnableCreatedTimestampZeroIngestion: enableCTZeroIngestion, + skipOffsetting: true, }, - { - name: "enabled with CT on counter", - counterSample: &dto.Counter{ - Value: proto.Float64(1.0), - // Timestamp does not matter as long as it exists in this test. - CreatedTimestamp: timestamppb.Now(), - }, - enableCTZeroIngestion: true, + log.NewLogfmtLogger(os.Stderr), + nil, + &collectResultAppendable{app}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ + GlobalConfig: config.GlobalConfig{ + // Disable regular scrapes. + ScrapeInterval: model.Duration(9999 * time.Minute), + ScrapeTimeout: model.Duration(5 * time.Second), + ScrapeProtocols: []config.ScrapeProtocol{config.OpenMetricsText1_0_0, config.PrometheusProto}, }, - { - name: "enabled without CT on counter", - counterSample: &dto.Counter{ - Value: proto.Float64(1.0), - }, - enableCTZeroIngestion: true, - }, - } { - t.Run(tc.name, func(t *testing.T) { - app := &collectResultAppender{} - scrapeManager, err := NewManager( - &Options{ - EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion, - skipOffsetting: true, - }, - log.NewLogfmtLogger(os.Stderr), - nil, - &collectResultAppendable{app}, - prometheus.NewRegistry(), - ) - require.NoError(t, err) + ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test", HonorTimestamps: honorTimestamps}}, + })) - require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ - GlobalConfig: config.GlobalConfig{ - // Disable regular scrapes. - ScrapeInterval: model.Duration(9999 * time.Minute), - ScrapeTimeout: model.Duration(5 * time.Second), - // Ensure the proto is chosen. We need proto as it's the only protocol - // with the CT parsing support. - ScrapeProtocols: []config.ScrapeProtocol{config.PrometheusProto}, - }, - ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test"}}, - })) + return app, scrapeManager +} - once := sync.Once{} - // Start fake HTTP target to that allow one scrape only. - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fail := true - once.Do(func() { - fail = false - w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) +func setupTestServer(t *testing.T, typ string, toWrite []byte) *httptest.Server { + once := sync.Once{} - ctrType := dto.MetricType_COUNTER - w.Write(protoMarshalDelimited(t, &dto.MetricFamily{ - Name: proto.String(mName), - Type: &ctrType, - Metric: []*dto.Metric{{Counter: tc.counterSample}}, - })) - }) - - if fail { - w.WriteHeader(http.StatusInternalServerError) - } - }), - ) - defer server.Close() - - serverURL, err := url.Parse(server.URL) - require.NoError(t, err) - - // Add fake target directly into tsets + reload. Normally users would use - // Manager.Run and wait for minimum 5s refresh interval. - scrapeManager.updateTsets(map[string][]*targetgroup.Group{ - "test": {{ - Targets: []model.LabelSet{{ - model.SchemeLabel: model.LabelValue(serverURL.Scheme), - model.AddressLabel: model.LabelValue(serverURL.Host), - }}, - }}, + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fail := true + once.Do(func() { + fail = false + w.Header().Set("Content-Type", typ) + w.Write(toWrite) }) - scrapeManager.reload() - var got []float64 - // Wait for one scrape. - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() - require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { - app.mtx.Lock() - defer app.mtx.Unlock() - - // Check if scrape happened and grab the relevant samples, they have to be there - or it's a bug - // and it's not worth waiting. - for _, f := range app.resultFloats { - if f.metric.Get(model.MetricNameLabel) == mName { - got = append(got, f.f) - } - } - if len(app.resultFloats) > 0 { - return nil - } - return fmt.Errorf("expected some samples, got none") - }), "after 1 minute") - scrapeManager.Stop() - - // Check for zero samples, assuming we only injected always one sample. - // Did it contain CT to inject? If yes, was CT zero enabled? - if tc.counterSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion { - require.Len(t, got, 2) - require.Equal(t, 0.0, got[0]) - require.Equal(t, tc.counterSample.GetValue(), got[1]) - return + if fail { + w.WriteHeader(http.StatusInternalServerError) } + }), + ) - // Expect only one, valid sample. - require.Len(t, got, 1) - require.Equal(t, tc.counterSample.GetValue(), got[0]) + t.Cleanup(func() { server.Close() }) + + return server +} + +// TestManagerCTZeroIngestion tests scrape manager for various CT cases. +func TestManagerCTZeroIngestion(t *testing.T) { + const ( + // _total suffix is required, otherwise expfmt with OMText will mark metric as "unknown" + expectedMetricName = "expected_metric_total" + expectedCreatedMetricName = "expected_metric_created" + expectedSampleValue = 17.0 + ) + + for _, testFormat := range []config.ScrapeProtocol{config.PrometheusProto, config.OpenMetricsText1_0_0} { + t.Run(fmt.Sprintf("format=%s", testFormat), func(t *testing.T) { + for _, testWithCT := range []bool{false, true} { + t.Run(fmt.Sprintf("withCT=%v", testWithCT), func(t *testing.T) { + for _, testCTZeroIngest := range []bool{false, true} { + t.Run(fmt.Sprintf("ctZeroIngest=%v", testCTZeroIngest), func(t *testing.T) { + sampleTs := time.Now() + ctTs := time.Time{} + if testWithCT { + ctTs = sampleTs.Add(-2 * time.Minute) + } + + // TODO(bwplotka): Add more types than just counter? + encoded := prepareTestEncodedCounter(t, testFormat, expectedMetricName, expectedSampleValue, sampleTs, ctTs) + app, scrapeManager := setupScrapeManager(t, true, testCTZeroIngest) + + // Perform the test. + doOneScrape(t, scrapeManager, app, setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded)) + + // Verify results. + // Verify what we got vs expectations around CT injection. + samples := findSamplesForMetric(app.resultFloats, expectedMetricName) + if testWithCT && testCTZeroIngest { + require.Len(t, samples, 2) + require.Equal(t, 0.0, samples[0].f) + require.Equal(t, timestamp.FromTime(ctTs), samples[0].t) + require.Equal(t, expectedSampleValue, samples[1].f) + require.Equal(t, timestamp.FromTime(sampleTs), samples[1].t) + } else { + require.Len(t, samples, 1) + require.Equal(t, expectedSampleValue, samples[0].f) + require.Equal(t, timestamp.FromTime(sampleTs), samples[0].t) + } + + // Verify what we got vs expectations around additional _created series for OM text. + // enableCTZeroInjection also kills that _created line. + createdSeriesSamples := findSamplesForMetric(app.resultFloats, expectedCreatedMetricName) + if testFormat == config.OpenMetricsText1_0_0 && testWithCT && !testCTZeroIngest { + // For OM Text, when counter has CT, and feature flag disabled we should see _created lines. + require.Len(t, createdSeriesSamples, 1) + // Conversion taken from common/expfmt.writeOpenMetricsFloat. + // We don't check the ct timestamp as explicit ts was not implemented in expfmt.Encoder, + // but exists in OM https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created + // We can implement this, but we want to potentially get rid of OM 1.0 CT lines + require.Equal(t, float64(timestamppb.New(ctTs).AsTime().UnixNano())/1e9, createdSeriesSamples[0].f) + } else { + require.Empty(t, createdSeriesSamples) + } + }) + } + }) + } }) } } +func prepareTestEncodedCounter(t *testing.T, format config.ScrapeProtocol, mName string, v float64, ts, ct time.Time) (encoded []byte) { + t.Helper() + + counter := &dto.Counter{Value: proto.Float64(v)} + if !ct.IsZero() { + counter.CreatedTimestamp = timestamppb.New(ct) + } + ctrType := dto.MetricType_COUNTER + inputMetric := &dto.MetricFamily{ + Name: proto.String(mName), + Type: &ctrType, + Metric: []*dto.Metric{{ + TimestampMs: proto.Int64(timestamp.FromTime(ts)), + Counter: counter, + }}, + } + switch format { + case config.PrometheusProto: + return protoMarshalDelimited(t, inputMetric) + case config.OpenMetricsText1_0_0: + buf := &bytes.Buffer{} + require.NoError(t, expfmt.NewEncoder(buf, expfmt.NewFormat(expfmt.TypeOpenMetrics), expfmt.WithCreatedLines(), expfmt.WithUnit()).Encode(inputMetric)) + _, _ = buf.WriteString("# EOF") + + t.Log("produced OM text to expose:", buf.String()) + return buf.Bytes() + default: + t.Fatalf("not implemented format: %v", format) + return nil + } +} + +func doOneScrape(t *testing.T, manager *Manager, appender *collectResultAppender, server *httptest.Server) { + t.Helper() + + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + // Add fake target directly into tsets + reload + manager.updateTsets(map[string][]*targetgroup.Group{ + "test": {{ + Targets: []model.LabelSet{{ + model.SchemeLabel: model.LabelValue(serverURL.Scheme), + model.AddressLabel: model.LabelValue(serverURL.Host), + }}, + }}, + }) + manager.reload() + + // Wait for one scrape. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { + appender.mtx.Lock() + defer appender.mtx.Unlock() + + // Check if scrape happened and grab the relevant samples. + if len(appender.resultFloats) > 0 { + return nil + } + return fmt.Errorf("expected some float samples, got none") + }), "after 1 minute") + manager.Stop() +} + +func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatSample) { + for _, f := range floats { + if f.metric.Get(model.MetricNameLabel) == metricName { + ret = append(ret, f) + } + } + return ret +} + // generateTestHistogram generates the same thing as tsdbutil.GenerateTestHistogram, // but in the form of dto.Histogram. func generateTestHistogram(i int) *dto.Histogram { diff --git a/scrape/scrape.go b/scrape/scrape.go index 071edfca5..c66f203dd 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1536,7 +1536,7 @@ type appendErrors struct { } func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { - p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.symbolTable) + p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.enableCTZeroIngestion, sl.symbolTable) if err != nil { level.Debug(sl.l).Log( "msg", "Invalid content type on scrape, using prometheus parser as fallback.", diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 04fd53601..57c51b2e9 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1525,7 +1525,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { fakeRef := storage.SeriesRef(1) expValue := float64(1) metric := []byte(`metric{n="1"} 1`) - p, warning := textparse.New(metric, "", false, labels.NewSymbolTable()) + p, warning := textparse.New(metric, "", false, false, labels.NewSymbolTable()) require.NoError(t, warning) var lset labels.Labels From b5479831b8a5e2723759c93bf1fa305d96ed53de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 1 Oct 2024 15:46:49 +0200 Subject: [PATCH 61/74] Unit test for regression in rate vs float and histogram mixup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- promql/engine_test.go | 115 +++++++++++++++++++++++++++++++++++++++++ storage/buffer_test.go | 50 ++++++++++++++++++ storage/series.go | 28 ++++++++++ 3 files changed, 193 insertions(+) diff --git a/promql/engine_test.go b/promql/engine_test.go index e4171eb5b..19bd78144 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "math" "sort" "strconv" "strings" @@ -29,11 +30,13 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/teststorage" @@ -3781,3 +3784,115 @@ func TestRateAnnotations(t *testing.T) { }) } } + +func TestHistogramRateWithFloatStaleness(t *testing.T) { + // Make a chunk with two normal histograms of the same value. + h1 := histogram.Histogram{ + Schema: 2, + Count: 10, + Sum: 100, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{100}, + } + + c1 := chunkenc.NewHistogramChunk() + app, err := c1.Appender() + require.NoError(t, err) + var ( + newc chunkenc.Chunk + recoded bool + ) + + newc, recoded, app, err = app.AppendHistogram(nil, 0, h1.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + newc, recoded, _, err = app.AppendHistogram(nil, 10, h1.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + // Make a chunk with a single float stale marker. + c2 := chunkenc.NewXORChunk() + app, err = c2.Appender() + require.NoError(t, err) + + app.Append(20, math.Float64frombits(value.StaleNaN)) + + // Make a chunk with two normal histograms that have zero value. + h2 := histogram.Histogram{ + Schema: 2, + } + + c3 := chunkenc.NewHistogramChunk() + app, err = c3.Appender() + require.NoError(t, err) + + newc, recoded, app, err = app.AppendHistogram(nil, 30, h2.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + newc, recoded, _, err = app.AppendHistogram(nil, 40, h2.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + querier := storage.MockQuerier{ + SelectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { + return &singleSeriesSet{ + series: mockSeries{chunks: []chunkenc.Chunk{c1, c2, c3}, labelSet: []string{"__name__", "foo"}}, + } + }, + } + + queriable := storage.MockQueryable{MockQuerier: &querier} + + engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) + + q, err := engine.NewInstantQuery(context.Background(), &queriable, nil, "rate(foo[40s])", timestamp.Time(45)) + require.NoError(t, err) + defer q.Close() + + res := q.Exec(context.Background()) + require.NoError(t, res.Err) + + vec, err := res.Vector() + require.NoError(t, err) + + // Single sample result. + require.Len(t, vec, 1) + // The result is a histogram. + require.NotNil(t, vec[0].H) + // The result should be zero as the histogram has not increased, so the rate is zero. + require.Equal(t, 0.0, vec[0].H.Count) + require.Equal(t, 0.0, vec[0].H.Sum) +} + +type singleSeriesSet struct { + series storage.Series + consumed bool +} + +func (s *singleSeriesSet) Next() bool { c := s.consumed; s.consumed = true; return !c } +func (s singleSeriesSet) At() storage.Series { return s.series } +func (s singleSeriesSet) Err() error { return nil } +func (s singleSeriesSet) Warnings() annotations.Annotations { return nil } + +type mockSeries struct { + chunks []chunkenc.Chunk + labelSet []string +} + +func (s mockSeries) Labels() labels.Labels { + return labels.FromStrings(s.labelSet...) +} + +func (s mockSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + iterables := []chunkenc.Iterator{} + for _, c := range s.chunks { + iterables = append(iterables, c.Iterator(nil)) + } + return storage.ChainSampleIteratorFromIterators(it, iterables) +} diff --git a/storage/buffer_test.go b/storage/buffer_test.go index b5c6443ac..6e8e83db8 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -314,6 +314,56 @@ func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) { require.Equal(t, histograms[1].ToFloat(nil), fh) } +func TestBufferedSeriesIteratorMixedFloatsAndHistograms(t *testing.T) { + histograms := tsdbutil.GenerateTestHistograms(5) + + it := NewBufferIterator(NewListSeriesIteratorWithCopy(samples{ + hSample{t: 1, h: histograms[0].Copy()}, + fSample{t: 2, f: 2}, + hSample{t: 3, h: histograms[1].Copy()}, + hSample{t: 4, h: histograms[2].Copy()}, + fhSample{t: 3, fh: histograms[3].ToFloat(nil)}, + fhSample{t: 4, fh: histograms[4].ToFloat(nil)}, + }), 6) + + require.Equal(t, chunkenc.ValNone, it.Seek(7)) + require.NoError(t, it.Err()) + + buf := it.Buffer() + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, h0 := buf.AtHistogram() + require.Equal(t, histograms[0], h0) + + require.Equal(t, chunkenc.ValFloat, buf.Next()) + _, v := buf.At() + require.Equal(t, 2.0, v) + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, h1 := buf.AtHistogram() + require.Equal(t, histograms[1], h1) + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, h2 := buf.AtHistogram() + require.Equal(t, histograms[2], h2) + + require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) + _, h3 := buf.AtFloatHistogram(nil) + require.Equal(t, histograms[3].ToFloat(nil), h3) + + require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) + _, h4 := buf.AtFloatHistogram(nil) + require.Equal(t, histograms[4].ToFloat(nil), h4) + + // Test for overwrite bug where the buffered histogram was reused + // between items in the buffer. + require.Equal(t, histograms[0], h0) + require.Equal(t, histograms[1], h1) + require.Equal(t, histograms[2], h2) + require.Equal(t, histograms[3].ToFloat(nil), h3) + require.Equal(t, histograms[4].ToFloat(nil), h4) +} + func BenchmarkBufferedSeriesIterator(b *testing.B) { // Simulate a 5 minute rate. it := NewBufferIterator(newFakeSeriesIterator(int64(b.N), 30), 5*60) diff --git a/storage/series.go b/storage/series.go index 70e3d0a19..a3dbec708 100644 --- a/storage/series.go +++ b/storage/series.go @@ -171,6 +171,34 @@ func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType { func (it *listSeriesIterator) Err() error { return nil } +type listSeriesIteratorWithCopy struct { + *listSeriesIterator +} + +func NewListSeriesIteratorWithCopy(samples Samples) chunkenc.Iterator { + return &listSeriesIteratorWithCopy{ + listSeriesIterator: &listSeriesIterator{samples: samples, idx: -1}, + } +} + +func (it *listSeriesIteratorWithCopy) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { + t, ih := it.listSeriesIterator.AtHistogram(nil) + if h == nil || ih == nil { + return t, ih + } + ih.CopyTo(h) + return t, h +} + +func (it *listSeriesIteratorWithCopy) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + t, ih := it.listSeriesIterator.AtFloatHistogram(nil) + if fh == nil || ih == nil { + return t, ih + } + ih.CopyTo(fh) + return t, fh +} + type listChunkSeriesIterator struct { chks []chunks.Meta idx int From 44ebbb8458adb429c135ea31a29a1852b69d65a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 1 Oct 2024 17:19:13 +0200 Subject: [PATCH 62/74] Fix missing histogram copy in sampleRing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The specialized version of sample add to the ring: func addH(s hSample, buf []hSample, r *sampleRing) []hSample func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample already correctly copy histogram samples from the reused hReader, fhReader buffers, but the generic version does not. This means that the data is overwritten on the next read if the sample ring has seen histogram and float samples at the same time and switched to generic mode. The `genericAdd` function (which was commented anyway) is by now quite different from the specialized functions so that this commit deletes it. Signed-off-by: György Krajcsovits --- storage/buffer.go | 71 +++++++++++------------------------------- tsdb/chunks/samples.go | 12 +++++++ tsdb/head.go | 11 +++++++ 3 files changed, 41 insertions(+), 53 deletions(-) diff --git a/storage/buffer.go b/storage/buffer.go index 9f31fb53f..ad504ad5d 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -187,6 +187,10 @@ func (s fSample) Type() chunkenc.ValueType { return chunkenc.ValFloat } +func (s fSample) Copy() chunks.Sample { + return s +} + type hSample struct { t int64 h *histogram.Histogram @@ -212,6 +216,10 @@ func (s hSample) Type() chunkenc.ValueType { return chunkenc.ValHistogram } +func (s hSample) Copy() chunks.Sample { + return hSample{t: s.t, h: s.h.Copy()} +} + type fhSample struct { t int64 fh *histogram.FloatHistogram @@ -237,6 +245,10 @@ func (s fhSample) Type() chunkenc.ValueType { return chunkenc.ValFloatHistogram } +func (s fhSample) Copy() chunks.Sample { + return fhSample{t: s.t, fh: s.fh.Copy()} +} + type sampleRing struct { delta int64 @@ -535,55 +547,8 @@ func (r *sampleRing) addFH(s fhSample) { } } -// genericAdd is a generic implementation of adding a chunks.Sample -// implementation to a buffer of a sample ring. However, the Go compiler -// currently (go1.20) decides to not expand the code during compile time, but -// creates dynamic code to handle the different types. That has a significant -// overhead during runtime, noticeable in PromQL benchmarks. For example, the -// "RangeQuery/expr=rate(a_hundred[1d]),steps=.*" benchmarks show about 7% -// longer runtime, 9% higher allocation size, and 10% more allocations. -// Therefore, genericAdd has been manually implemented for all the types -// (addSample, addF, addH, addFH) below. -// -// func genericAdd[T chunks.Sample](s T, buf []T, r *sampleRing) []T { -// l := len(buf) -// // Grow the ring buffer if it fits no more elements. -// if l == 0 { -// buf = make([]T, 16) -// l = 16 -// } -// if l == r.l { -// newBuf := make([]T, 2*l) -// copy(newBuf[l+r.f:], buf[r.f:]) -// copy(newBuf, buf[:r.f]) -// -// buf = newBuf -// r.i = r.f -// r.f += l -// l = 2 * l -// } else { -// r.i++ -// if r.i >= l { -// r.i -= l -// } -// } -// -// buf[r.i] = s -// r.l++ -// -// // Free head of the buffer of samples that just fell out of the range. -// tmin := s.T() - r.delta -// for buf[r.f].T() < tmin { -// r.f++ -// if r.f >= l { -// r.f -= l -// } -// r.l-- -// } -// return buf -// } - -// addSample is a handcoded specialization of genericAdd (see above). +// addSample adds a sample to a buffer of chunks.Sample, i.e. the general case +// using an interface as the type. func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sample { l := len(buf) // Grow the ring buffer if it fits no more elements. @@ -607,7 +572,7 @@ func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sam } } - buf[r.i] = s + buf[r.i] = s.Copy() r.l++ // Free head of the buffer of samples that just fell out of the range. @@ -622,7 +587,7 @@ func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sam return buf } -// addF is a handcoded specialization of genericAdd (see above). +// addF adds an fSample to a (specialized) fSample buffer. func addF(s fSample, buf []fSample, r *sampleRing) []fSample { l := len(buf) // Grow the ring buffer if it fits no more elements. @@ -661,7 +626,7 @@ func addF(s fSample, buf []fSample, r *sampleRing) []fSample { return buf } -// addH is a handcoded specialization of genericAdd (see above). +// addF adds an hSample to a (specialized) hSample buffer. func addH(s hSample, buf []hSample, r *sampleRing) []hSample { l := len(buf) // Grow the ring buffer if it fits no more elements. @@ -705,7 +670,7 @@ func addH(s hSample, buf []hSample, r *sampleRing) []hSample { return buf } -// addFH is a handcoded specialization of genericAdd (see above). +// addFH adds an fhSample to a (specialized) fhSample buffer. func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample { l := len(buf) // Grow the ring buffer if it fits no more elements. diff --git a/tsdb/chunks/samples.go b/tsdb/chunks/samples.go index 638660c70..a5b16094d 100644 --- a/tsdb/chunks/samples.go +++ b/tsdb/chunks/samples.go @@ -29,6 +29,7 @@ type Sample interface { H() *histogram.Histogram FH() *histogram.FloatHistogram Type() chunkenc.ValueType + Copy() Sample // Returns a deep copy. } type SampleSlice []Sample @@ -70,6 +71,17 @@ func (s sample) Type() chunkenc.ValueType { } } +func (s sample) Copy() Sample { + c := sample{t: s.t, f: s.f} + if s.h != nil { + c.h = s.h.Copy() + } + if s.fh != nil { + c.fh = s.fh.Copy() + } + return c +} + // GenerateSamples starting at start and counting up numSamples. func GenerateSamples(start, numSamples int) []Sample { return generateSamples(start, numSamples, func(i int) Sample { diff --git a/tsdb/head.go b/tsdb/head.go index af16fbf37..f469e5e34 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -2081,6 +2081,17 @@ func (s sample) Type() chunkenc.ValueType { } } +func (s sample) Copy() chunks.Sample { + c := sample{t: s.t, f: s.f} + if s.h != nil { + c.h = s.h.Copy() + } + if s.fh != nil { + c.fh = s.fh.Copy() + } + return c +} + // memSeries is the in-memory representation of a series. None of its methods // are goroutine safe and it is the caller's responsibility to lock it. type memSeries struct { From 06945b39331a471b2b6b14e0f38a31c6720e3146 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jorge=20Alberto=20D=C3=ADaz=20Orozco?= Date: Wed, 2 Oct 2024 13:37:10 +0200 Subject: [PATCH 63/74] Add a mutex and used ports list to the random port generator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit to avoid port collisions Signed-off-by: Jorge Alberto Díaz Orozco --- util/testutil/port.go | 41 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/util/testutil/port.go b/util/testutil/port.go index 1e449b123..7cf4cf1cc 100644 --- a/util/testutil/port.go +++ b/util/testutil/port.go @@ -15,21 +15,56 @@ package testutil import ( "net" + "sync" "testing" ) +var ( + mu sync.Mutex + usedPorts []int +) + // RandomUnprivilegedPort returns valid unprivileged random port number which can be used for testing. func RandomUnprivilegedPort(t *testing.T) int { t.Helper() + mu.Lock() + defer mu.Unlock() + port, err := getPort() + if err != nil { + t.Fatal(err) + } + + for portWasUsed(port) { + port, err = getPort() + if err != nil { + t.Fatal(err) + } + } + + usedPorts = append(usedPorts, port) + + return port +} + +func portWasUsed(port int) bool { + for _, usedPort := range usedPorts { + if port == usedPort { + return true + } + } + return false +} + +func getPort() (int, error) { listener, err := net.Listen("tcp", ":0") if err != nil { - t.Fatalf("Listening on random port: %v", err) + return 0, err } if err := listener.Close(); err != nil { - t.Fatalf("Closing listener: %v", err) + return 0, err } - return listener.Addr().(*net.TCPAddr).Port + return listener.Addr().(*net.TCPAddr).Port, nil } From b6158e8956110b24e8b39072dd6d9c9d0fd649bb Mon Sep 17 00:00:00 2001 From: Julien Date: Thu, 3 Oct 2024 10:26:05 +0200 Subject: [PATCH 64/74] Notify web UI when starting up and shutting down Signed-off-by: Julien --- cmd/prometheus/main.go | 3 +++ web/api/notifications.go | 2 ++ 2 files changed, 5 insertions(+) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index d8369770b..8ad1db637 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -503,6 +503,7 @@ func main() { notifs := api.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) cfg.web.NotificationsSub = notifs.Sub cfg.web.NotificationsGetter = notifs.Get + notifs.AddNotification(api.StartingUp) if err := cfg.setFeatureListOptions(logger); err != nil { fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) @@ -989,6 +990,7 @@ func main() { func(err error) { close(cancel) webHandler.SetReady(web.Stopping) + notifs.AddNotification(api.ShuttingDown) }, ) } @@ -1174,6 +1176,7 @@ func main() { reloadReady.Close() webHandler.SetReady(web.Ready) + notifs.DeleteNotification(api.StartingUp) level.Info(logger).Log("msg", "Server is ready to receive web requests.") <-cancel return nil diff --git a/web/api/notifications.go b/web/api/notifications.go index 976f0b076..a838fbd98 100644 --- a/web/api/notifications.go +++ b/web/api/notifications.go @@ -22,6 +22,8 @@ import ( const ( ConfigurationUnsuccessful = "Configuration reload has failed." + StartingUp = "Prometheus is starting and replaying the write-ahead log (WAL)." + ShuttingDown = "Prometheus is shutting down and gracefully stopping all operations." ) // Notification represents an individual notification message. From 1f40859f9e599de5f96e50a55b0eed614b7583a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 07:44:20 +0000 Subject: [PATCH 65/74] Bump google.golang.org/api from 0.195.0 to 0.199.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.195.0 to 0.199.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.195.0...v0.199.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 4d0c98719..eb125a66f 100644 --- a/go.mod +++ b/go.mod @@ -81,9 +81,9 @@ require ( golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.24.0 - google.golang.org/api v0.195.0 + google.golang.org/api v0.199.0 google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 - google.golang.org/grpc v1.66.2 + google.golang.org/grpc v1.67.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -95,9 +95,9 @@ require ( ) require ( - cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth v0.9.5 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -106,7 +106,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect - github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -133,7 +133,7 @@ require ( github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.1 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect diff --git a/go.sum b/go.sum index 73dafaa10..423973d8e 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= -cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= +cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -22,8 +22,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -120,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= @@ -251,8 +251,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1056,8 +1056,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= -google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= +google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= +google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1116,8 +1116,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= -google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 52cc937af0b784f4d32439c69ed336c02e423019 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 07:46:29 +0000 Subject: [PATCH 66/74] Bump go.uber.org/automaxprocs from 1.5.3 to 1.6.0 Bumps [go.uber.org/automaxprocs](https://github.com/uber-go/automaxprocs) from 1.5.3 to 1.6.0. - [Release notes](https://github.com/uber-go/automaxprocs/releases) - [Changelog](https://github.com/uber-go/automaxprocs/blob/master/CHANGELOG.md) - [Commits](https://github.com/uber-go/automaxprocs/compare/v1.5.3...v1.6.0) --- updated-dependencies: - dependency-name: go.uber.org/automaxprocs dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4d0c98719..cb8d8a7ff 100644 --- a/go.mod +++ b/go.mod @@ -72,7 +72,7 @@ require ( go.opentelemetry.io/otel/sdk v1.30.0 go.opentelemetry.io/otel/trace v1.30.0 go.uber.org/atomic v1.11.0 - go.uber.org/automaxprocs v1.5.3 + go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.23.0 diff --git a/go.sum b/go.sum index 73dafaa10..b5d30ca42 100644 --- a/go.sum +++ b/go.sum @@ -758,8 +758,8 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= From 21e0f83b68789f281a5c1639ccd5b30a486d7fc8 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 4 Oct 2024 10:11:56 +0200 Subject: [PATCH 67/74] Move notifications in utils Signed-off-by: Julien --- cmd/prometheus/main.go | 14 +++++++------- {web/api => util/notifications}/notifications.go | 2 +- .../notifications}/notifications_test.go | 2 +- web/api/v1/api.go | 10 +++++----- web/web.go | 6 +++--- 5 files changed, 17 insertions(+), 17 deletions(-) rename {web/api => util/notifications}/notifications.go (99%) rename {web/api => util/notifications}/notifications_test.go (99%) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 8ad1db637..11d8caae6 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -76,9 +76,9 @@ import ( "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/logging" + "github.com/prometheus/prometheus/util/notifications" prom_runtime "github.com/prometheus/prometheus/util/runtime" "github.com/prometheus/prometheus/web" - "github.com/prometheus/prometheus/web/api" ) var ( @@ -500,10 +500,10 @@ func main() { logger := promlog.New(&cfg.promlogConfig) - notifs := api.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) + notifs := notifications.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) cfg.web.NotificationsSub = notifs.Sub cfg.web.NotificationsGetter = notifs.Get - notifs.AddNotification(api.StartingUp) + notifs.AddNotification(notifications.StartingUp) if err := cfg.setFeatureListOptions(logger); err != nil { fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) @@ -990,7 +990,7 @@ func main() { func(err error) { close(cancel) webHandler.SetReady(web.Stopping) - notifs.AddNotification(api.ShuttingDown) + notifs.AddNotification(notifications.ShuttingDown) }, ) } @@ -1091,10 +1091,10 @@ func main() { callback := func(success bool) { if success { - notifs.DeleteNotification(api.ConfigurationUnsuccessful) + notifs.DeleteNotification(notifications.ConfigurationUnsuccessful) return } - notifs.AddNotification(api.ConfigurationUnsuccessful) + notifs.AddNotification(notifications.ConfigurationUnsuccessful) } g.Add( @@ -1176,7 +1176,7 @@ func main() { reloadReady.Close() webHandler.SetReady(web.Ready) - notifs.DeleteNotification(api.StartingUp) + notifs.DeleteNotification(notifications.StartingUp) level.Info(logger).Log("msg", "Server is ready to receive web requests.") <-cancel return nil diff --git a/web/api/notifications.go b/util/notifications/notifications.go similarity index 99% rename from web/api/notifications.go rename to util/notifications/notifications.go index a838fbd98..4888a0b66 100644 --- a/web/api/notifications.go +++ b/util/notifications/notifications.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package api +package notifications import ( "sync" diff --git a/web/api/notifications_test.go b/util/notifications/notifications_test.go similarity index 99% rename from web/api/notifications_test.go rename to util/notifications/notifications_test.go index 437ff1ec4..e487e9ce5 100644 --- a/web/api/notifications_test.go +++ b/util/notifications/notifications_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package api +package notifications import ( "sync" diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 46666af90..95ab7ea2a 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -54,8 +54,8 @@ import ( "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/httputil" + "github.com/prometheus/prometheus/util/notifications" "github.com/prometheus/prometheus/util/stats" - "github.com/prometheus/prometheus/web/api" ) type status string @@ -214,8 +214,8 @@ type API struct { gatherer prometheus.Gatherer isAgent bool statsRenderer StatsRenderer - notificationsGetter func() []api.Notification - notificationsSub func() (<-chan api.Notification, func(), bool) + notificationsGetter func() []notifications.Notification + notificationsSub func() (<-chan notifications.Notification, func(), bool) remoteWriteHandler http.Handler remoteReadHandler http.Handler @@ -249,8 +249,8 @@ func NewAPI( corsOrigin *regexp.Regexp, runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, - notificationsGetter func() []api.Notification, - notificationsSub func() (<-chan api.Notification, func(), bool), + notificationsGetter func() []notifications.Notification, + notificationsSub func() (<-chan notifications.Notification, func(), bool), gatherer prometheus.Gatherer, registerer prometheus.Registerer, statsRenderer StatsRenderer, diff --git a/web/web.go b/web/web.go index 724ca9105..5e1d3d230 100644 --- a/web/web.go +++ b/web/web.go @@ -59,7 +59,7 @@ import ( "github.com/prometheus/prometheus/template" "github.com/prometheus/prometheus/util/httputil" "github.com/prometheus/prometheus/util/netconnlimit" - "github.com/prometheus/prometheus/web/api" + "github.com/prometheus/prometheus/util/notifications" api_v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/prometheus/prometheus/web/ui" ) @@ -267,8 +267,8 @@ type Options struct { RuleManager *rules.Manager Notifier *notifier.Manager Version *PrometheusVersion - NotificationsGetter func() []api.Notification - NotificationsSub func() (<-chan api.Notification, func(), bool) + NotificationsGetter func() []notifications.Notification + NotificationsSub func() (<-chan notifications.Notification, func(), bool) Flags map[string]string ListenAddresses []string From 9d275c23de3c1d2c1e8c40bfb2f21eb86394d026 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 4 Oct 2024 11:17:59 +0200 Subject: [PATCH 68/74] cmd/prometheus: Fix flakiness of QueryLogTest Now we check that a rule execution has taken place. This also reduces the time to run the rules tests from 45s to 25s. Signed-off-by: Julien --- cmd/prometheus/query_log_test.go | 55 ++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index 62e317bf8..3b00230cd 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -125,12 +125,61 @@ func (p *queryLogTest) query(t *testing.T) { require.NoError(t, err) require.Equal(t, 200, r.StatusCode) case ruleOrigin: - time.Sleep(2 * time.Second) + // Poll the /api/v1/rules endpoint until a new rule evaluation is detected. + var lastEvalTime time.Time + for { + r, err := http.Get(fmt.Sprintf("http://%s:%d/api/v1/rules", p.host, p.port)) + require.NoError(t, err) + + rulesBody, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + + // Parse the rules response to find the last evaluation time. + newEvalTime := parseLastEvaluation(rulesBody) + if newEvalTime.After(lastEvalTime) { + if !lastEvalTime.IsZero() { + break + } + lastEvalTime = newEvalTime + } + + time.Sleep(100 * time.Millisecond) + } default: panic("can't query this origin") } } +// parseLastEvaluation extracts the last evaluation timestamp from the /api/v1/rules response. +func parseLastEvaluation(rulesBody []byte) time.Time { + var ruleResponse struct { + Status string `json:"status"` + Data struct { + Groups []struct { + Rules []struct { + LastEvaluation string `json:"lastEvaluation"` + } `json:"rules"` + } `json:"groups"` + } `json:"data"` + } + + err := json.Unmarshal(rulesBody, &ruleResponse) + if err != nil { + return time.Time{} + } + + for _, group := range ruleResponse.Data.Groups { + for _, rule := range group.Rules { + if evalTime, err := time.Parse(time.RFC3339Nano, rule.LastEvaluation); err == nil { + return evalTime + } + } + } + + return time.Time{} +} + // queryString returns the expected queryString of a this test. func (p *queryLogTest) queryString() string { switch p.origin { @@ -322,7 +371,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Len(t, ql, qc) } else { - require.Greater(t, len(ql), qc, "no queries logged") + require.GreaterOrEqual(t, len(ql), qc, "no queries logged") } p.validateLastQuery(t, ql) qc = len(ql) @@ -353,7 +402,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Len(t, ql, qc) } else { - require.Greater(t, len(ql), qc, "no queries logged") + require.GreaterOrEqual(t, len(ql), qc, "no queries logged") } p.validateLastQuery(t, ql) From 47aeca96630344464a139b0963dcc85ff463f84d Mon Sep 17 00:00:00 2001 From: Manik Rana Date: Fri, 4 Oct 2024 17:41:02 +0530 Subject: [PATCH 69/74] feat: naive fixes and optimzations for `CreatedTimestamp` function (#14965) * enhance: wip ct parse optimizations Signed-off-by: Manik Rana * feat: further work on optimization Signed-off-by: Manik Rana * feat: further improvements and remove unused code Signed-off-by: Manik Rana * feat: improve optimizations and fix some CT parse errors Signed-off-by: Manik Rana * fix: check for LsetHash along with name Signed-off-by: Manik Rana * chore: cleanup and documentation Signed-off-by: Manik Rana * enhance: improve comments and add cleaner functions Signed-off-by: Manik Rana * feat: improve comments and add cleaner functions Signed-off-by: Manik Rana * chore: rename to resetCTParseValues Signed-off-by: Manik Rana * fix: post-merge fixes Signed-off-by: Manik Rana * fix: add all possible reserved suffixes Signed-off-by: Manik Rana * test: separate CT values for each metric Signed-off-by: Manik Rana --------- Signed-off-by: Manik Rana Signed-off-by: Manik Rana --- model/textparse/openmetricsparse.go | 106 +++++++--- model/textparse/openmetricsparse_test.go | 259 ++++++++++------------- 2 files changed, 184 insertions(+), 181 deletions(-) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index 8ec1b62ff..0e82dc9f5 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -95,6 +95,12 @@ type OpenMetricsParser struct { exemplarTs int64 hasExemplarTs bool + // Created timestamp parsing state. + ct int64 + ctHashSet uint64 + // visitedName is the metric name of the last visited metric when peeking ahead + // for _created series during the execution of the CreatedTimestamp method. + visitedName string skipCTSeries bool } @@ -254,6 +260,9 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { func (p *OpenMetricsParser) CreatedTimestamp() *int64 { if !typeRequiresCT(p.mtype) { // Not a CT supported metric type, fast path. + p.ct = 0 + p.visitedName = "" + p.ctHashSet = 0 return nil } @@ -264,27 +273,44 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { ) p.Metric(&currLset) currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") - // Search for the _created line for the currFamilyLsetHash using ephemeral parser until - // we see EOF or new metric family. We have to do it as we don't know where (and if) - // that CT line is. - // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. - peek := deepCopy(p) + currName := currLset.Get(model.MetricNameLabel) + currName = findBaseMetricName(currName) + + // make sure we're on a new metric before returning + if currName == p.visitedName && currFamilyLsetHash == p.ctHashSet && p.visitedName != "" && p.ctHashSet > 0 && p.ct > 0 { + // CT is already known, fast path. + return &p.ct + } + + // Create a new lexer to reset the parser once this function is done executing. + resetLexer := &openMetricsLexer{ + b: p.l.b, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + + p.skipCTSeries = false + for { - eType, err := peek.Next() + eType, err := p.Next() if err != nil { - // This means peek will give error too later on, so def no CT line found. + // This means p.Next() will give error too later on, so def no CT line found. // This might result in partial scrape with wrong/missing CT, but only // spec improvement would help. - // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + p.resetCTParseValues(resetLexer) return nil } if eType != EntrySeries { // Assume we hit different family, no CT line found. + p.resetCTParseValues(resetLexer) return nil } var peekedLset labels.Labels - peek.Metric(&peekedLset) + p.Metric(&peekedLset) peekedName := peekedLset.Get(model.MetricNameLabel) if !strings.HasSuffix(peekedName, "_created") { // Not a CT line, search more. @@ -294,17 +320,52 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { // We got a CT line here, but let's search if CT line is actually for our series, edge case. peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") if peekWithoutNameLsetHash != currFamilyLsetHash { - // CT line for a different series, for our series no CT. + // Found CT line for a different series, for our series no CT. + p.resetCTParseValues(resetLexer) return nil } // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps - ct := int64(peek.val * 1000.0) + ct := int64(p.val * 1000.0) + p.setCTParseValues(ct, currFamilyLsetHash, currName, true, resetLexer) return &ct } } +// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found. +// This is useful to prevent re-parsing the same series again and early return the CT value. +func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, visitedName string, skipCTSeries bool, resetLexer *openMetricsLexer) { + p.ct = ct + p.l = resetLexer + p.ctHashSet = ctHashSet + p.visitedName = visitedName + p.skipCTSeries = skipCTSeries +} + +// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called. +func (p *OpenMetricsParser) resetCTParseValues(resetLexer *openMetricsLexer) { + p.l = resetLexer + p.ct = 0 + p.ctHashSet = 0 + p.visitedName = "" + p.skipCTSeries = true +} + +// findBaseMetricName returns the metric name without reserved suffixes such as "_created", +// "_sum", etc. based on the OpenMetrics specification found at +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md. +// If no suffix is found, the original name is returned. +func findBaseMetricName(name string) string { + suffixes := []string{"_created", "_count", "_sum", "_bucket", "_total", "_gcount", "_gsum", "_info"} + for _, suffix := range suffixes { + if strings.HasSuffix(name, suffix) { + return strings.TrimSuffix(name, suffix) + } + } + return name +} + // typeRequiresCT returns true if the metric type requires a _created timestamp. func typeRequiresCT(t model.MetricType) bool { switch t { @@ -315,29 +376,6 @@ func typeRequiresCT(t model.MetricType) bool { } } -// deepCopy creates a copy of a parser without re-using the slices' original memory addresses. -func deepCopy(p *OpenMetricsParser) OpenMetricsParser { - newB := make([]byte, len(p.l.b)) - copy(newB, p.l.b) - - newLexer := &openMetricsLexer{ - b: newB, - i: p.l.i, - start: p.l.start, - err: p.l.err, - state: p.l.state, - } - - newParser := OpenMetricsParser{ - l: newLexer, - builder: p.builder, - mtype: p.mtype, - val: p.val, - skipCTSeries: false, - } - return newParser -} - // nextToken returns the next token from the openMetricsLexer. func (p *OpenMetricsParser) nextToken() token { tok := p.l.Lex() diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index 93033380b..bbb7c0730 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -79,17 +79,38 @@ bar_count 17.0 bar_sum 324789.3 bar{quantile="0.95"} 123.7 bar{quantile="0.99"} 150.0 -bar_created 1520872607.123 +bar_created 1520872608.124 # HELP baz Histogram with the same objective as above's summary # TYPE baz histogram baz_bucket{le="0.0"} 0 baz_bucket{le="+Inf"} 17 baz_count 17 baz_sum 324789.3 -baz_created 1520872607.123 +baz_created 1520872609.125 # HELP fizz_created Gauge which shouldn't be parsed as CT # TYPE fizz_created gauge -fizz_created 17.0` +fizz_created 17.0 +# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_created 1520430001 +something_bucket{le="0.0"} 1 +something_bucket{le="+Inf"} 18 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 20 +yum_sum 324789.5 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_count 21 +foobar_created 1520430004 +foobar_sum 324789.6 +foobar{quantile="0.95"} 123.8 +foobar{quantile="0.99"} 150.1` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" @@ -269,22 +290,22 @@ fizz_created 17.0` m: "bar_count", v: 17.0, lset: labels.FromStrings("__name__", "bar_count"), - ct: int64p(1520872607123), + ct: int64p(1520872608124), }, { m: "bar_sum", v: 324789.3, lset: labels.FromStrings("__name__", "bar_sum"), - ct: int64p(1520872607123), + ct: int64p(1520872608124), }, { m: `bar{quantile="0.95"}`, v: 123.7, lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), - ct: int64p(1520872607123), + ct: int64p(1520872608124), }, { m: `bar{quantile="0.99"}`, v: 150.0, lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), - ct: int64p(1520872607123), + ct: int64p(1520872608124), }, { m: "baz", help: "Histogram with the same objective as above's summary", @@ -295,22 +316,22 @@ fizz_created 17.0` m: `baz_bucket{le="0.0"}`, v: 0, lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), - ct: int64p(1520872607123), + ct: int64p(1520872609125), }, { m: `baz_bucket{le="+Inf"}`, v: 17, lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), - ct: int64p(1520872607123), + ct: int64p(1520872609125), }, { m: `baz_count`, v: 17, lset: labels.FromStrings("__name__", "baz_count"), - ct: int64p(1520872607123), + ct: int64p(1520872609125), }, { m: `baz_sum`, v: 324789.3, lset: labels.FromStrings("__name__", "baz_sum"), - ct: int64p(1520872607123), + ct: int64p(1520872609125), }, { m: "fizz_created", help: "Gauge which shouldn't be parsed as CT", @@ -321,6 +342,84 @@ fizz_created 17.0` m: `fizz_created`, v: 17, lset: labels.FromStrings("__name__", "fizz_created"), + }, { + m: "something", + help: "Histogram with _created between buckets and summary", + }, { + m: "something", + typ: model.MetricTypeHistogram, + }, { + m: `something_count`, + v: 18, + lset: labels.FromStrings("__name__", "something_count"), + ct: int64p(1520430001000), + }, { + m: `something_sum`, + v: 324789.4, + lset: labels.FromStrings("__name__", "something_sum"), + ct: int64p(1520430001000), + }, { + m: `something_bucket{le="0.0"}`, + v: 1, + lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"), + ct: int64p(1520430001000), + }, { + m: `something_bucket{le="+Inf"}`, + v: 18, + lset: labels.FromStrings("__name__", "something_bucket", "le", "+Inf"), + ct: int64p(1520430001000), + }, { + m: "yum", + help: "Summary with _created between sum and quantiles", + }, { + m: "yum", + typ: model.MetricTypeSummary, + }, { + m: `yum_count`, + v: 20, + lset: labels.FromStrings("__name__", "yum_count"), + ct: int64p(1520430003000), + }, { + m: `yum_sum`, + v: 324789.5, + lset: labels.FromStrings("__name__", "yum_sum"), + ct: int64p(1520430003000), + }, { + m: `yum{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"), + ct: int64p(1520430003000), + }, { + m: `yum{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"), + ct: int64p(1520430003000), + }, { + m: "foobar", + help: "Summary with _created as the first line", + }, { + m: "foobar", + typ: model.MetricTypeSummary, + }, { + m: `foobar_count`, + v: 21, + lset: labels.FromStrings("__name__", "foobar_count"), + ct: int64p(1520430004000), + }, { + m: `foobar_sum`, + v: 324789.6, + lset: labels.FromStrings("__name__", "foobar_sum"), + ct: int64p(1520430004000), + }, { + m: `foobar{quantile="0.95"}`, + v: 123.8, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"), + ct: int64p(1520430004000), + }, { + m: `foobar{quantile="0.99"}`, + v: 150.1, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"), + ct: int64p(1520430004000), }, { m: "metric", help: "foo\x00bar", @@ -784,34 +883,13 @@ func TestOMNullByteHandling(t *testing.T) { // these tests show them. // TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. func TestCTParseFailures(t *testing.T) { - input := `# HELP something Histogram with _created between buckets and summary -# TYPE something histogram -something_count 17 -something_sum 324789.3 -something_created 1520872607.123 -something_bucket{le="0.0"} 0 -something_bucket{le="+Inf"} 17 -# HELP thing Histogram with _created as first line + input := `# HELP thing Histogram with _created as first line # TYPE thing histogram thing_created 1520872607.123 thing_count 17 thing_sum 324789.3 thing_bucket{le="0.0"} 0 -thing_bucket{le="+Inf"} 17 -# HELP yum Summary with _created between sum and quantiles -# TYPE yum summary -yum_count 17.0 -yum_sum 324789.3 -yum_created 1520872607.123 -yum{quantile="0.95"} 123.7 -yum{quantile="0.99"} 150.0 -# HELP foobar Summary with _created as the first line -# TYPE foobar summary -foobar_created 1520872607.123 -foobar_count 17.0 -foobar_sum 324789.3 -foobar{quantile="0.95"} 123.7 -foobar{quantile="0.99"} 150.0` +thing_bucket{le="+Inf"} 17` input += "\n# EOF\n" @@ -827,30 +905,6 @@ foobar{quantile="0.99"} 150.0` exp := []expectCT{ { - m: "something", - help: "Histogram with _created between buckets and summary", - isErr: false, - }, { - m: "something", - typ: model.MetricTypeHistogram, - isErr: false, - }, { - m: `something_count`, - ct: int64p(1520872607123), - isErr: false, - }, { - m: `something_sum`, - ct: int64p(1520872607123), - isErr: false, - }, { - m: `something_bucket{le="0.0"}`, - ct: int64p(1520872607123), - isErr: true, - }, { - m: `something_bucket{le="+Inf"}`, - ct: int64p(1520872607123), - isErr: true, - }, { m: "thing", help: "Histogram with _created as first line", isErr: false, @@ -874,54 +928,6 @@ foobar{quantile="0.99"} 150.0` m: `thing_bucket{le="+Inf"}`, ct: int64p(1520872607123), isErr: true, - }, { - m: "yum", - help: "Summary with _created between summary and quantiles", - isErr: false, - }, { - m: "yum", - typ: model.MetricTypeSummary, - isErr: false, - }, { - m: "yum_count", - ct: int64p(1520872607123), - isErr: false, - }, { - m: "yum_sum", - ct: int64p(1520872607123), - isErr: false, - }, { - m: `yum{quantile="0.95"}`, - ct: int64p(1520872607123), - isErr: true, - }, { - m: `yum{quantile="0.99"}`, - ct: int64p(1520872607123), - isErr: true, - }, { - m: "foobar", - help: "Summary with _created as the first line", - isErr: false, - }, { - m: "foobar", - typ: model.MetricTypeSummary, - isErr: false, - }, { - m: "foobar_count", - ct: int64p(1520430004), - isErr: true, - }, { - m: "foobar_sum", - ct: int64p(1520430004), - isErr: true, - }, { - m: `foobar{quantile="0.95"}`, - ct: int64p(1520430004), - isErr: true, - }, { - m: `foobar{quantile="0.99"}`, - ct: int64p(1520430004), - isErr: true, }, } @@ -953,47 +959,6 @@ foobar{quantile="0.99"} 150.0` } } -func TestDeepCopy(t *testing.T) { - input := []byte(`# HELP go_goroutines A gauge goroutines. -# TYPE go_goroutines gauge -go_goroutines 33 123.123 -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds -go_gc_duration_seconds_created`) - - st := labels.NewSymbolTable() - parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser) - - // Modify the original parser state - _, err := parser.Next() - require.NoError(t, err) - require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) - require.True(t, parser.skipCTSeries) - - // Create a deep copy of the parser - copyParser := deepCopy(parser) - etype, err := copyParser.Next() - require.NoError(t, err) - require.Equal(t, EntryType, etype) - require.True(t, parser.skipCTSeries) - require.False(t, copyParser.skipCTSeries) - - // Modify the original parser further - parser.Next() - parser.Next() - parser.Next() - require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) - require.Equal(t, "summary", string(parser.mtype)) - require.False(t, copyParser.skipCTSeries) - require.True(t, parser.skipCTSeries) - - // Ensure the copy remains unchanged - copyParser.Next() - copyParser.Next() - require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]])) - require.False(t, copyParser.skipCTSeries) -} - func BenchmarkOMParseCreatedTimestamp(b *testing.B) { for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { From 3d2194f56179300c3aa6653d1d72b225f85f7e9f Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 4 Oct 2024 13:54:03 +0200 Subject: [PATCH 70/74] Style cleanups, mostly for web notifications and startup alert Some of the changes are a bit unreadable because the previous files were not saved with the project's linter / auto-formatter settings applied. But it's basically: * For icons that are not Mantine-native components, use the rem() function for computing their size, so they scale correctly with the root font size. See https://mantine.dev/styles/rem/. * Try a different icon for the notifications tray, since the bell icon was already used for Prometheus alerts. Other candidates from https://tabler.io/icons would be IconExclamationCircle or IconDeviceDesktopExclamation or IconMessageCircleExclamation. * The server startup alert looked a bit cramped, introduced a Stack to add spacing between the text and the progress bar. * Added a bit of spacing between notification text and date. Things looked cramped. To make things look ok with that, I also top-aligned the notification text and icon. Signed-off-by: Julius Volz --- .../src/components/NotificationsIcon.tsx | 136 ++++++++++++------ .../src/components/ReadinessWrapper.tsx | 34 +++-- web/ui/mantine-ui/src/pages/RulesPage.tsx | 9 +- .../query/MetricsExplorer/LabelsExplorer.tsx | 5 +- 4 files changed, 121 insertions(+), 63 deletions(-) diff --git a/web/ui/mantine-ui/src/components/NotificationsIcon.tsx b/web/ui/mantine-ui/src/components/NotificationsIcon.tsx index 5ab28b037..6d5afa190 100644 --- a/web/ui/mantine-ui/src/components/NotificationsIcon.tsx +++ b/web/ui/mantine-ui/src/components/NotificationsIcon.tsx @@ -1,61 +1,105 @@ -import { ActionIcon, Indicator, Popover, Card, Text, Stack, ScrollArea, Group } from "@mantine/core"; -import { IconBell, IconAlertTriangle, IconNetworkOff } from "@tabler/icons-react"; -import { useNotifications } from '../state/useNotifications'; +import { + ActionIcon, + Indicator, + Popover, + Card, + Text, + Stack, + ScrollArea, + Group, + rem, +} from "@mantine/core"; +import { + IconAlertTriangle, + IconNetworkOff, + IconMessageExclamation, +} from "@tabler/icons-react"; +import { useNotifications } from "../state/useNotifications"; import { actionIconStyle } from "../styles"; -import { useSettings } from '../state/settingsSlice'; +import { useSettings } from "../state/settingsSlice"; import { formatTimestamp } from "../lib/formatTime"; const NotificationsIcon = () => { const { notifications, isConnectionError } = useNotifications(); const { useLocalTime } = useSettings(); - return ( - (notifications.length === 0 && !isConnectionError) ? null : ( - - - - - - - + return notifications.length === 0 && !isConnectionError ? null : ( + + + + + + + - - - Notifications - - { isConnectionError ? ( - - - - - Real-time notifications interrupted. - Please refresh the page or check your connection. - - - - ) : notifications.length === 0 ? ( - No notifications - ) : (notifications.map((notification, index) => ( + + + + Notifications + + + {isConnectionError ? ( + + + + + + Real-time notifications interrupted. + + + Please refresh the page or check your connection. + + + + + ) : notifications.length === 0 ? ( + + No notifications + + ) : ( + notifications.map((notification, index) => ( - - - - {notification.text} - {formatTimestamp(new Date(notification.date).valueOf() / 1000, useLocalTime)} + + + + + {notification.text} + + + {formatTimestamp( + new Date(notification.date).valueOf() / 1000, + useLocalTime + )} + - )))} - - - - - - ) + )) + )} + + + + + ); }; diff --git a/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx b/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx index dbfcba555..2e471de5e 100644 --- a/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx +++ b/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx @@ -4,7 +4,7 @@ import { useAppDispatch } from "../state/hooks"; import { updateSettings, useSettings } from "../state/settingsSlice"; import { useSuspenseAPIQuery } from "../api/api"; import { WALReplayStatus } from "../api/responseTypes/walreplay"; -import { Progress, Alert } from "@mantine/core"; +import { Progress, Alert, Stack } from "@mantine/core"; import { useSuspenseQuery } from "@tanstack/react-query"; const STATUS_STARTING = "is starting up..."; @@ -57,14 +57,12 @@ const ReadinessLoader: FC = () => { // Only call WAL replay status API if the service is starting up. const shouldQueryWALReplay = statusMessage === STATUS_STARTING; - const { - data: walData, - isSuccess: walSuccess, - } = useSuspenseAPIQuery({ - path: "/status/walreplay", - key: ["walreplay", queryKey], - enabled: shouldQueryWALReplay, // Only enabled when service is starting up. - }); + const { data: walData, isSuccess: walSuccess } = + useSuspenseAPIQuery({ + path: "/status/walreplay", + key: ["walreplay", queryKey], + enabled: shouldQueryWALReplay, // Only enabled when service is starting up. + }); useEffect(() => { if (ready) { @@ -80,14 +78,18 @@ const ReadinessLoader: FC = () => { return ( } + title={ + "Prometheus " + + ((agentMode && "Agent ") || "") + + (statusMessage || STATUS_LOADING) + } + icon={} maw={500} mx="auto" mt="lg" > {shouldQueryWALReplay && walSuccess && walData && ( - <> + Replaying WAL ({walData.data.current}/{walData.data.max}) @@ -95,9 +97,13 @@ const ReadinessLoader: FC = () => { size="xl" animated color="yellow" - value={((walData.data.current - walData.data.min + 1) / (walData.data.max - walData.data.min + 1)) * 100} + value={ + ((walData.data.current - walData.data.min + 1) / + (walData.data.max - walData.data.min + 1)) * + 100 + } /> - + )} ); diff --git a/web/ui/mantine-ui/src/pages/RulesPage.tsx b/web/ui/mantine-ui/src/pages/RulesPage.tsx index ce0097776..a4ed44e7c 100644 --- a/web/ui/mantine-ui/src/pages/RulesPage.tsx +++ b/web/ui/mantine-ui/src/pages/RulesPage.tsx @@ -4,6 +4,7 @@ import { Badge, Card, Group, + rem, Stack, Text, Tooltip, @@ -135,11 +136,15 @@ export default function RulesPage() { {r.type === "alerting" ? ( - + ) : ( - + )} {r.name} diff --git a/web/ui/mantine-ui/src/pages/query/MetricsExplorer/LabelsExplorer.tsx b/web/ui/mantine-ui/src/pages/query/MetricsExplorer/LabelsExplorer.tsx index 782fb5cf4..d18c017b1 100644 --- a/web/ui/mantine-ui/src/pages/query/MetricsExplorer/LabelsExplorer.tsx +++ b/web/ui/mantine-ui/src/pages/query/MetricsExplorer/LabelsExplorer.tsx @@ -21,6 +21,7 @@ import { Skeleton, Stack, Table, + rem, } from "@mantine/core"; import { escapeString } from "../../../lib/escapeString"; import serializeNode from "../../../promql/serialize"; @@ -326,7 +327,9 @@ const LabelsExplorer: FC = ({ title="Cancel" style={{ flexShrink: 0 }} > - + ) : ( From 6c3d11629b3c46e17267e3da652bd51e6090d316 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Sat, 5 Oct 2024 17:05:42 -0700 Subject: [PATCH 71/74] add missing flag storage.tsdb.allow-overlapping-compaction Signed-off-by: Ben Ye --- cmd/prometheus/main.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 11d8caae6..f670bc8b8 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -384,6 +384,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). Default("false").BoolVar(&cfg.tsdb.NoLockfile) + serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks."). + Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction) + serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) From ab64966e9d21ce3a3e42415da3a4227f8220b15c Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sun, 6 Oct 2024 18:35:29 +0200 Subject: [PATCH 72/74] fix: use "ErrorContains" or "EqualError" instead of "Contains(t, err.Error()" and "Equal(t, err.Error()" (#15094) * fix: use "ErrorContains" or "EqualError" instead of "Contains(t, err.Error()" and "Equal(t, err.Error()" --------- Signed-off-by: Matthieu MOREL Signed-off-by: Arve Knudsen Co-authored-by: Arve Knudsen --- .golangci.yml | 12 +----------- cmd/promtool/main_test.go | 8 ++++---- config/config_test.go | 3 +-- discovery/consul/consul_test.go | 2 +- discovery/hetzner/robot_test.go | 3 +-- discovery/openstack/hypervisor_test.go | 3 +-- discovery/openstack/instance_test.go | 3 +-- discovery/triton/triton_test.go | 7 ++----- discovery/xds/client_test.go | 6 ++---- discovery/xds/kuma_test.go | 3 +-- model/rulefmt/rulefmt_test.go | 11 ++++------- model/textparse/interface_test.go | 3 +-- model/textparse/openmetricsparse_test.go | 6 +++--- model/textparse/promparse_test.go | 6 ++---- promql/fuzz_test.go | 2 +- promql/parser/parse_test.go | 5 ++--- scrape/scrape_test.go | 4 ++-- storage/fanout_test.go | 18 ++++++------------ storage/remote/chunked_test.go | 4 ++-- storage/remote/codec_test.go | 5 ++--- tsdb/block_test.go | 4 ++-- tsdb/compact_test.go | 3 +-- tsdb/head_test.go | 3 +-- web/api/v1/api_test.go | 2 +- 24 files changed, 45 insertions(+), 81 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 303cd33d8..d476be743 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -153,14 +153,4 @@ linters-settings: disable: - float-compare - go-require - enable: - - bool-compare - - compares - - empty - - error-is-as - - error-nil - - expected-actual - - len - - require-error - - suite-dont-use-pkg - - suite-extra-assert-call + enable-all: true diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 9d891c32f..698e6641d 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -146,7 +146,7 @@ func TestCheckSDFile(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, err := checkSDFile(test.file) if test.err != "" { - require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -228,7 +228,7 @@ func TestCheckTargetConfig(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { - require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -315,7 +315,7 @@ func TestCheckConfigSyntax(t *testing.T) { expectedErrMsg = test.errWindows } if expectedErrMsg != "" { - require.Equalf(t, expectedErrMsg, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, expectedErrMsg, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -345,7 +345,7 @@ func TestAuthorizationConfig(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { - require.Contains(t, err.Error(), test.err, "Expected error to contain %q, got %q", test.err, err.Error()) + require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error()) return } require.NoError(t, err) diff --git a/config/config_test.go b/config/config_test.go index 66377f687..47241e621 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2095,8 +2095,7 @@ func TestBadConfigs(t *testing.T) { }() for _, ee := range expectedErrors { _, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) - require.Error(t, err, "%s", ee.filename) - require.Contains(t, err.Error(), ee.errMsg, + require.ErrorContains(t, err, ee.errMsg, "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) } } diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index e3bc7938f..e288a5b2a 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -407,7 +407,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { err = d.getDatacenter() // An error should be returned. - require.Equal(t, tc.errMessage, err.Error()) + require.EqualError(t, err, tc.errMessage) // Should still be empty. require.Equal(t, "", d.clientDatacenter) } diff --git a/discovery/hetzner/robot_test.go b/discovery/hetzner/robot_test.go index abee5fea9..814bccd51 100644 --- a/discovery/hetzner/robot_test.go +++ b/discovery/hetzner/robot_test.go @@ -95,8 +95,7 @@ func TestRobotSDRefreshHandleError(t *testing.T) { require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) - require.Error(t, err) - require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error()) + require.EqualError(t, err, "non 2xx status '401' response during hetzner service discovery with role robot") require.Empty(t, targetGroups) } diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go index 45684b4a2..e4a97f32c 100644 --- a/discovery/openstack/hypervisor_test.go +++ b/discovery/openstack/hypervisor_test.go @@ -93,6 +93,5 @@ func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := hypervisor.refresh(ctx) - require.Error(t, err) - require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) + require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) } diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index 2b5ac1b89..2617baa4e 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -134,6 +134,5 @@ func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := hypervisor.refresh(ctx) - require.Error(t, err) - require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) + require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) } diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index e37693e6b..b2d06afaf 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -21,7 +21,6 @@ import ( "net/http/httptest" "net/url" "strconv" - "strings" "testing" "github.com/prometheus/client_golang/prometheus" @@ -182,8 +181,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) { td, m, _ := newTritonDiscovery(conf) _, err := td.refresh(context.Background()) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint")) + require.ErrorContains(t, err, "an error occurred when requesting targets from the discovery endpoint") m.Unregister() } @@ -193,8 +191,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := td.refresh(ctx) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), context.Canceled.Error())) + require.ErrorContains(t, err, context.Canceled.Error()) m.Unregister() } diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index b699995fb..2cf5b2f9c 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -52,16 +52,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.Error(t, err) - require.Equal(t, "invalid xDS server URL", err.Error()) + require.EqualError(t, err, "invalid xDS server URL") } func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.Error(t, err) - require.Contains(t, err.Error(), "must be either 'http' or 'https'") + require.ErrorContains(t, err, "must be either 'http' or 'https'") } func TestMakeXDSResourceHttpEndpoint(t *testing.T) { diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index cfb9cbac5..23d754c4b 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -201,9 +201,8 @@ func TestKumaMadsV1ResourceParserInvalidResources(t *testing.T) { }} groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL) require.Nil(t, groups) - require.Error(t, err) - require.Contains(t, err.Error(), "cannot parse") + require.ErrorContains(t, err, "cannot parse") } func TestNewKumaHTTPDiscovery(t *testing.T) { diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index ef5008f4b..669f1da4e 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -85,9 +85,8 @@ func TestParseFileFailure(t *testing.T) { for _, c := range table { _, errs := ParseFile(filepath.Join("testdata", c.filename)) - require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename) - require.Error(t, errs[0]) - require.Containsf(t, errs[0].Error(), c.errMsg, "Expected error for %s.", c.filename) + require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename) + require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename) } } @@ -259,8 +258,7 @@ func TestError(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := tt.error.Error() - require.Equal(t, tt.want, got) + require.EqualError(t, tt.error, tt.want) }) } } @@ -308,8 +306,7 @@ func TestWrappedError(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := tt.wrappedError.Error() - require.Equal(t, tt.want, got) + require.EqualError(t, tt.wrappedError, tt.want) }) } } diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index 970b96706..e010cb36e 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -98,8 +98,7 @@ func TestNewParser(t *testing.T) { if tt.err == "" { require.NoError(t, err) } else { - require.Error(t, err) - require.Contains(t, err.Error(), tt.err) + require.ErrorContains(t, err, tt.err) } }) } diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index bbb7c0730..ea1f2a25f 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -804,7 +804,7 @@ func TestOpenMetricsParseErrors(t *testing.T) { for err == nil { _, err = p.Next() } - require.Equal(t, c.err, err.Error(), "test %d: %s", i, c.input) + require.EqualError(t, err, c.err, "test %d: %s", i, c.input) } } @@ -871,11 +871,11 @@ func TestOMNullByteHandling(t *testing.T) { } if c.err == "" { - require.Equal(t, io.EOF, err, "test %d", i) + require.ErrorIs(t, err, io.EOF, "test %d", i) continue } - require.Equal(t, c.err, err.Error(), "test %d", i) + require.EqualError(t, err, c.err, "test %d", i) } } diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 4520dfe9a..e0337f8fd 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -423,8 +423,7 @@ func TestPromParseErrors(t *testing.T) { for err == nil { _, err = p.Next() } - require.Error(t, err) - require.Equal(t, c.err, err.Error(), "test %d", i) + require.EqualError(t, err, c.err, "test %d", i) } } @@ -483,8 +482,7 @@ func TestPromNullByteHandling(t *testing.T) { continue } - require.Error(t, err) - require.Equal(t, c.err, err.Error(), "test %d", i) + require.EqualError(t, err, c.err, "test %d", i) } } diff --git a/promql/fuzz_test.go b/promql/fuzz_test.go index 1f0bbaa66..4a26798de 100644 --- a/promql/fuzz_test.go +++ b/promql/fuzz_test.go @@ -29,7 +29,7 @@ func TestfuzzParseMetricWithContentTypePanicOnInvalid(t *testing.T) { } else { err, ok := p.(error) require.True(t, ok) - require.Contains(t, err.Error(), "duplicate parameter name") + require.ErrorContains(t, err, "duplicate parameter name") } }() diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index f50137b6d..b5096b777 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -3925,8 +3925,7 @@ func TestParseExpressions(t *testing.T) { require.Equal(t, expected, expr, "error on input '%s'", test.input) } else { - require.Error(t, err) - require.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error()) + require.ErrorContains(t, err, test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error()) var errorList ParseErrors ok := errors.As(err, &errorList) @@ -4468,7 +4467,7 @@ func TestRecoverParserError(t *testing.T) { e := errors.New("custom error") defer func() { - require.Equal(t, e.Error(), err.Error()) + require.EqualError(t, err, e.Error()) }() defer p.recover(&err) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 57c51b2e9..f065ecebb 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -441,7 +441,7 @@ func TestScrapePoolTargetLimit(t *testing.T) { lerr := l.(*testLoop).getForcedError() if shouldErr { require.Error(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit) - require.Equal(t, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit), lerr.Error()) + require.EqualError(t, lerr, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit)) } else { require.NoError(t, lerr) } @@ -2549,7 +2549,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) { resp, err := ts.scrape(context.Background()) require.NoError(t, err) _, err = ts.readResponse(context.Background(), resp, io.Discard) - require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err) + require.ErrorContains(t, err, "404", "Expected \"404 NotFound\" error but got: %s", err) } func TestTargetScraperBodySizeLimit(t *testing.T) { diff --git a/storage/fanout_test.go b/storage/fanout_test.go index 4613fe757..3eef9e3cd 100644 --- a/storage/fanout_test.go +++ b/storage/fanout_test.go @@ -173,16 +173,13 @@ func TestFanoutErrors(t *testing.T) { } if tc.err != nil { - require.Error(t, ss.Err()) - require.Equal(t, tc.err.Error(), ss.Err().Error()) + require.EqualError(t, ss.Err(), tc.err.Error()) } if tc.warning != nil { - require.NotEmpty(t, ss.Warnings(), "warnings expected") w := ss.Warnings() - require.Error(t, w.AsErrors()[0]) - warn, _ := w.AsStrings("", 0, 0) - require.Equal(t, tc.warning.Error(), warn[0]) + require.NotEmpty(t, w, "warnings expected") + require.EqualError(t, w.AsErrors()[0], tc.warning.Error()) } }) t.Run("chunks", func(t *testing.T) { @@ -200,16 +197,13 @@ func TestFanoutErrors(t *testing.T) { } if tc.err != nil { - require.Error(t, ss.Err()) - require.Equal(t, tc.err.Error(), ss.Err().Error()) + require.EqualError(t, ss.Err(), tc.err.Error()) } if tc.warning != nil { - require.NotEmpty(t, ss.Warnings(), "warnings expected") w := ss.Warnings() - require.Error(t, w.AsErrors()[0]) - warn, _ := w.AsStrings("", 0, 0) - require.Equal(t, tc.warning.Error(), warn[0]) + require.NotEmpty(t, w, "warnings expected") + require.EqualError(t, w.AsErrors()[0], tc.warning.Error()) } }) } diff --git a/storage/remote/chunked_test.go b/storage/remote/chunked_test.go index 7c3993ca6..82ed86634 100644 --- a/storage/remote/chunked_test.go +++ b/storage/remote/chunked_test.go @@ -86,7 +86,7 @@ func TestChunkedReader_Overflow(t *testing.T) { _, err = NewChunkedReader(bytes.NewReader(b2), 11, nil).Next() require.Error(t, err, "expect exceed limit error") - require.Equal(t, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes", err.Error()) + require.EqualError(t, err, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes") } func TestChunkedReader_CorruptedFrame(t *testing.T) { @@ -102,5 +102,5 @@ func TestChunkedReader_CorruptedFrame(t *testing.T) { _, err = NewChunkedReader(bytes.NewReader(bs), 20, nil).Next() require.Error(t, err, "expected malformed frame") - require.Equal(t, "chunkedReader: corrupted frame; checksum mismatch", err.Error()) + require.EqualError(t, err, "chunkedReader: corrupted frame; checksum mismatch") } diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 404f1add7..5b058d84e 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -253,8 +253,7 @@ func TestValidateLabelsAndMetricName(t *testing.T) { t.Run(test.description, func(t *testing.T) { err := validateLabelsAndMetricName(test.input) if test.expectedErr != "" { - require.Error(t, err) - require.Equal(t, test.expectedErr, err.Error()) + require.EqualError(t, err, test.expectedErr) } else { require.NoError(t, err) } @@ -551,7 +550,7 @@ func TestNegotiateResponseType(t *testing.T) { _, err = NegotiateResponseType([]prompb.ReadRequest_ResponseType{20}) require.Error(t, err, "expected error due to not supported requested response types") - require.Equal(t, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]", err.Error()) + require.EqualError(t, err, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]") } func TestMergeLabels(t *testing.T) { diff --git a/tsdb/block_test.go b/tsdb/block_test.go index b418a1382..bd86b2781 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -192,7 +192,7 @@ func TestCorruptedChunk(t *testing.T) { // Check open err. b, err := OpenBlock(nil, blockDir, nil) if tc.openErr != nil { - require.Equal(t, tc.openErr.Error(), err.Error()) + require.EqualError(t, err, tc.openErr.Error()) return } defer func() { require.NoError(t, b.Close()) }() @@ -206,7 +206,7 @@ func TestCorruptedChunk(t *testing.T) { require.True(t, set.Next()) it := set.At().Iterator(nil) require.Equal(t, chunkenc.ValNone, it.Next()) - require.Equal(t, tc.iterErr.Error(), it.Err().Error()) + require.EqualError(t, it.Err(), tc.iterErr.Error()) }) } } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 939e93350..d69b70d20 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1045,8 +1045,7 @@ func TestCompaction_populateBlock(t *testing.T) { } err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}, irPostingsFunc) if tc.expErr != nil { - require.Error(t, err) - require.Equal(t, tc.expErr.Error(), err.Error()) + require.EqualError(t, err, tc.expErr.Error()) return } require.NoError(t, err) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 8c401bc6f..19dcc1f08 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -2414,8 +2414,7 @@ func TestAddDuplicateLabelName(t *testing.T) { add := func(labels labels.Labels, labelName string) { app := h.Appender(context.Background()) _, err := app.Append(0, labels, 0, 0) - require.Error(t, err) - require.Equal(t, fmt.Sprintf(`label name "%s" is not unique: invalid sample`, labelName), err.Error()) + require.EqualError(t, err, fmt.Sprintf(`label name "%s" is not unique: invalid sample`, labelName)) } add(labels.FromStrings("a", "c", "a", "b"), "a") diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 8323e143c..334c41ce8 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -4176,7 +4176,7 @@ func TestExtractQueryOpts(t *testing.T) { if test.err == nil { require.NoError(t, err) } else { - require.Equal(t, test.err.Error(), err.Error()) + require.EqualError(t, err, test.err.Error()) } }) } From 5a54ae67edc4b140f14df25b3f08e438b5f846a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 6 Oct 2024 20:01:57 +0200 Subject: [PATCH 73/74] Bump actions/checkout from 4.1.6 to 4.2.0 (#15064) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.6 to 4.2.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/a5ac7e51b41094c92402da3b24376905380afc29...d632683dd7b4114ad314bca15554477dd762a938) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- .github/workflows/ci.yml | 26 ++++++++++----------- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/container_description.yml | 4 ++-- .github/workflows/repo_sync.yml | 2 +- .github/workflows/scorecards.yml | 2 +- 7 files changed, 20 insertions(+), 20 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 8f932b759..bf7f681b6 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -12,7 +12,7 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 1b189926f..669305ebd 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2ef0e97a1..2714211dd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ jobs: # should also be updated. image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment with: @@ -29,7 +29,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... @@ -48,7 +48,7 @@ jobs: # The go version in this image should be N-1 wrt test_go. image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - run: make build # Don't run NPM build; don't run race-detector. - run: make test GO_ONLY=1 test-flags="" @@ -62,7 +62,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment with: @@ -79,7 +79,7 @@ jobs: name: Go tests on Windows runs-on: windows-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.23.x @@ -96,7 +96,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - run: go install ./cmd/promtool/. - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest @@ -121,7 +121,7 @@ jobs: matrix: thread: [ 0, 1, 2 ] steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/build with: @@ -146,7 +146,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/build with: @@ -169,7 +169,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: @@ -182,7 +182,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: @@ -208,7 +208,7 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/publish_main with: @@ -225,7 +225,7 @@ jobs: || (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/publish_release with: @@ -240,7 +240,7 @@ jobs: needs: [test_ui, codeql] steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - name: Install nodejs uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1466f4ec2..77fbd4daf 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Initialize CodeQL uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml index 8ddbc34ae..144859486 100644 --- a/.github/workflows/container_description.yml +++ b/.github/workflows/container_description.yml @@ -18,7 +18,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Set docker hub repo name run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - name: Push README to Dockerhub @@ -40,7 +40,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Set quay.io org name run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV - name: Set quay.io repo name diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index 537e9abd8..aa306c46d 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -13,7 +13,7 @@ jobs: container: image: quay.io/prometheus/golang-builder steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - run: ./scripts/sync_repo_files.sh env: GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index b5fbc7c94..c63727f7f 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -21,7 +21,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0 with: persist-credentials: false From 8d281c3491564738aab1f6735eea4542428d999f Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Mon, 7 Oct 2024 10:55:26 +0200 Subject: [PATCH 74/74] textparse: Refactored benchmark (#15083) * textparse: Refactored benchmark. Signed-off-by: bwplotka * Kill sinks, tested, nothing is inlined. Signed-off-by: bwplotka * Addressed comments. Signed-off-by: bwplotka --------- Signed-off-by: bwplotka --- model/textparse/benchmark_test.go | 177 ++++++++++++++++ model/textparse/openmetricsparse_test.go | 44 ---- model/textparse/promparse_test.go | 191 ------------------ model/textparse/protobufparse_test.go | 4 +- model/textparse/{ => testdata}/omtestdata.txt | 0 .../{ => testdata}/promtestdata.nometa.txt | 0 .../textparse/{ => testdata}/promtestdata.txt | 0 7 files changed, 180 insertions(+), 236 deletions(-) create mode 100644 model/textparse/benchmark_test.go rename model/textparse/{ => testdata}/omtestdata.txt (100%) rename model/textparse/{ => testdata}/promtestdata.nometa.txt (100%) rename model/textparse/{ => testdata}/promtestdata.txt (100%) diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go new file mode 100644 index 000000000..3b8b8f305 --- /dev/null +++ b/model/textparse/benchmark_test.go @@ -0,0 +1,177 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" +) + +type newParser func([]byte, *labels.SymbolTable) Parser + +var newTestParserFns = map[string]newParser{ + "promtext": NewPromParser, + "promproto": func(b []byte, st *labels.SymbolTable) Parser { + return NewProtobufParser(b, true, st) + }, + "omtext": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + }, +} + +// BenchmarkParse benchmarks parsing, mimicking how scrape/scrape.go#append use it. +// Typically used as follows: +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParse' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +// For profiles, add -memprofile=${bench}.mem.pprof -cpuprofile=${bench}.cpu.pprof +// options. +// +// NOTE(bwplotka): Previous iterations of this benchmark had different cases for isolated +// Series, Series+Metrics with and without reuse, Series+CT. Those cases are sometimes +// good to know if you are working on a certain optimization, but it does not +// make sense to persist such cases for everybody (e.g. for CI one day). +// For local iteration, feel free to adjust cases/comment out code etc. +// +// NOTE(bwplotka): Do not try to conclude "what parser (OM, proto, prom) is the fastest" +// as the testdata has different amount and type of metrics and features (e.g. exemplars). +func BenchmarkParse(b *testing.B) { + for _, bcase := range []struct { + dataFile string // Localized to "./testdata". + dataProto []byte + parser string + + compareToExpfmtFormat expfmt.FormatType + }{ + {dataFile: "promtestdata.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain}, + {dataFile: "promtestdata.nometa.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain}, + + // We don't pass compareToExpfmtFormat: expfmt.TypeProtoDelim as expfmt does not support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430. + {dataProto: createTestProtoBuf(b).Bytes(), parser: "promproto"}, + + // We don't pass compareToExpfmtFormat: expfmt.TypeOpenMetrics as expfmt does not support OM exemplars, see https://github.com/prometheus/common/issues/703. + {dataFile: "omtestdata.txt", parser: "omtext"}, + {dataFile: "promtestdata.txt", parser: "omtext"}, // Compare how omtext parser deals with Prometheus text format vs promtext. + } { + var buf []byte + dataCase := bcase.dataFile + if len(bcase.dataProto) > 0 { + dataCase = "createTestProtoBuf()" + buf = bcase.dataProto + } else { + f, err := os.Open(filepath.Join("testdata", bcase.dataFile)) + require.NoError(b, err) + b.Cleanup(func() { + _ = f.Close() + }) + buf, err = io.ReadAll(f) + require.NoError(b, err) + } + b.Run(fmt.Sprintf("data=%v/parser=%v", dataCase, bcase.parser), func(b *testing.B) { + newParserFn := newTestParserFns[bcase.parser] + var ( + res labels.Labels + e exemplar.Exemplar + ) + + b.SetBytes(int64(len(buf))) + b.ReportAllocs() + b.ResetTimer() + + st := labels.NewSymbolTable() + for i := 0; i < b.N; i++ { + p := newParserFn(buf, st) + + Inner: + for { + t, err := p.Next() + switch t { + case EntryInvalid: + if errors.Is(err, io.EOF) { + break Inner + } + b.Fatal(err) + case EntryType: + _, _ = p.Type() + continue + case EntryHelp: + _, _ = p.Help() + continue + case EntryUnit: + _, _ = p.Unit() + continue + case EntryComment: + continue + case EntryHistogram: + _, _, _, _ = p.Histogram() + case EntrySeries: + _, _, _ = p.Series() + default: + b.Fatal("not implemented entry", t) + } + + _ = p.Metric(&res) + _ = p.CreatedTimestamp() + for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { + } + } + } + }) + + b.Run(fmt.Sprintf("data=%v/parser=xpfmt", dataCase), func(b *testing.B) { + if bcase.compareToExpfmtFormat == expfmt.TypeUnknown { + b.Skip("compareToExpfmtFormat not set") + } + + b.SetBytes(int64(len(buf))) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + decSamples := make(model.Vector, 0, 50) + sdec := expfmt.SampleDecoder{ + Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(bcase.compareToExpfmtFormat)), + Opts: &expfmt.DecodeOptions{ + Timestamp: model.TimeFromUnixNano(0), + }, + } + + for { + if err := sdec.Decode(&decSamples); err != nil { + if errors.Is(err, io.EOF) { + break + } + b.Fatal(err) + } + decSamples = decSamples[:0] + } + } + }) + } +} diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index ea1f2a25f..1d2e7feb0 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -16,7 +16,6 @@ package textparse import ( "errors" "io" - "os" "testing" "github.com/prometheus/common/model" @@ -958,46 +957,3 @@ thing_bucket{le="+Inf"} 17` i++ } } - -func BenchmarkOMParseCreatedTimestamp(b *testing.B) { - for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ - "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { - return NewOpenMetricsParser(b, st) - }, - "openmetrics-skip-ct": func(b []byte, st *labels.SymbolTable) Parser { - return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) - }, - } { - f, err := os.Open("omtestdata.txt") - require.NoError(b, err) - defer f.Close() - - buf, err := io.ReadAll(f) - require.NoError(b, err) - - b.Run(parserName+"/parse-ct/"+"omtestdata.txt", func(b *testing.B) { - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - p.CreatedTimestamp() - } - } - } - }) - } -} diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index e0337f8fd..e700b3527 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -14,17 +14,13 @@ package textparse import ( - "bytes" "errors" "io" - "os" "strings" "testing" - "github.com/klauspost/compress/gzip" "github.com/stretchr/testify/require" - "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -485,190 +481,3 @@ func TestPromNullByteHandling(t *testing.T) { require.EqualError(t, err, c.err, "test %d", i) } } - -const ( - promtestdataSampleCount = 410 -) - -func BenchmarkPromParse(b *testing.B) { - for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ - "prometheus": NewPromParser, - "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { - return NewOpenMetricsParser(b, st) - }, - } { - for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { - f, err := os.Open(fn) - require.NoError(b, err) - defer f.Close() - - buf, err := io.ReadAll(f) - require.NoError(b, err) - - b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) { - total := 0 - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run(parserName+"/decode-metric/"+fn, func(b *testing.B) { - total := 0 - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - - var res labels.Labels - p.Metric(&res) - - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run(parserName+"/decode-metric-reuse/"+fn, func(b *testing.B) { - total := 0 - var res labels.Labels - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - - p.Metric(&res) - - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run("expfmt-text/"+fn, func(b *testing.B) { - if parserName != "prometheus" { - b.Skip() - } - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - total := 0 - - for i := 0; i < b.N; i += promtestdataSampleCount { - decSamples := make(model.Vector, 0, 50) - sdec := expfmt.SampleDecoder{ - Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(expfmt.TypeTextPlain)), - Opts: &expfmt.DecodeOptions{ - Timestamp: model.TimeFromUnixNano(0), - }, - } - - for { - if err = sdec.Decode(&decSamples); err != nil { - break - } - total += len(decSamples) - decSamples = decSamples[:0] - } - } - _ = total - }) - } - } -} - -func BenchmarkGzip(b *testing.B) { - for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { - b.Run(fn, func(b *testing.B) { - f, err := os.Open(fn) - require.NoError(b, err) - defer f.Close() - - var buf bytes.Buffer - gw := gzip.NewWriter(&buf) - - n, err := io.Copy(gw, f) - require.NoError(b, err) - require.NoError(b, gw.Close()) - - gbuf, err := io.ReadAll(&buf) - require.NoError(b, err) - - k := b.N / promtestdataSampleCount - - b.ReportAllocs() - b.SetBytes(n / promtestdataSampleCount) - b.ResetTimer() - - total := 0 - - for i := 0; i < k; i++ { - gr, err := gzip.NewReader(bytes.NewReader(gbuf)) - require.NoError(b, err) - - d, err := io.ReadAll(gr) - require.NoError(b, err) - require.NoError(b, gr.Close()) - - total += len(d) - } - _ = total - }) - } -} diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index cf34ae52d..01c6ac506 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -32,7 +32,9 @@ import ( dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) -func createTestProtoBuf(t *testing.T) *bytes.Buffer { +func createTestProtoBuf(t testing.TB) *bytes.Buffer { + t.Helper() + testMetricFamilies := []string{ `name: "go_build_info" help: "Build information about the main Go module." diff --git a/model/textparse/omtestdata.txt b/model/textparse/testdata/omtestdata.txt similarity index 100% rename from model/textparse/omtestdata.txt rename to model/textparse/testdata/omtestdata.txt diff --git a/model/textparse/promtestdata.nometa.txt b/model/textparse/testdata/promtestdata.nometa.txt similarity index 100% rename from model/textparse/promtestdata.nometa.txt rename to model/textparse/testdata/promtestdata.nometa.txt diff --git a/model/textparse/promtestdata.txt b/model/textparse/testdata/promtestdata.txt similarity index 100% rename from model/textparse/promtestdata.txt rename to model/textparse/testdata/promtestdata.txt