From 20e3c295aea5992a4d8ea8e7e45ed9c9d6639813 Mon Sep 17 00:00:00 2001 From: "Xiaochao Dong (@damnever)" Date: Fri, 24 Mar 2023 17:18:24 +0800 Subject: [PATCH 001/137] Optimize constant label pair adding with relabel.Replace Signed-off-by: Xiaochao Dong (@damnever) --- model/relabel/relabel.go | 15 +++++++++ model/relabel/relabel_test.go | 60 +++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index d29c3d07ae..83b1a91474 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -267,6 +267,17 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return false } case Replace: + // Fast path to add or delete label pair. + if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { + if !model.LabelName(cfg.TargetLabel).IsValid() || cfg.Replacement == "" { + lb.Del(cfg.TargetLabel) + } else { + lb.Set(cfg.TargetLabel, cfg.Replacement) + } + break + } + indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -316,3 +327,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return true } + +func varInRegexTemplate(template string) bool { + return strings.Contains(template, "$") +} diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 517b9b8223..d3815afe62 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -15,6 +15,7 @@ package relabel import ( "fmt" + "sort" "testing" "github.com/prometheus/common/model" @@ -850,3 +851,62 @@ func BenchmarkRelabel(b *testing.B) { }) } } + +func BenchmarkRelabel_ReplaceAddLabel(b *testing.B) { + cfgs := []*Config{} + for k, v := range map[string]string{ + "wwwwww": "wwwwww", + "xxxxxxxxx": "xxxxxxxxx", + "yyyyyyyyyyyy": "yyyyyyyyyyyy", + "${0}": "dropped", + "dropped": "${0}", + } { + cfgs = append(cfgs, &Config{ + Action: DefaultRelabelConfig.Action, + Separator: DefaultRelabelConfig.Separator, + Regex: DefaultRelabelConfig.Regex, + TargetLabel: k, + Replacement: v, + }) + } + expectLset := labels.Labels{ + labels.Label{Name: "abcdefg01", Value: "hijklmn1"}, + labels.Label{Name: "abcdefg02", Value: "hijklmn2"}, + labels.Label{Name: "abcdefg03", Value: "hijklmn3"}, + labels.Label{Name: "abcdefg04", Value: "hijklmn4"}, + labels.Label{Name: "abcdefg05", Value: "hijklmn5"}, + labels.Label{Name: "abcdefg06", Value: "hijklmn6"}, + labels.Label{Name: "abcdefg07", Value: "hijklmn7"}, + labels.Label{Name: "abcdefg08", Value: "hijklmn8"}, + labels.Label{Name: "abcdefg09", Value: "hijklmn9"}, + labels.Label{Name: "abcdefg10", Value: "hijklmn10"}, + labels.Label{Name: "abcdefg11", Value: "hijklmn11"}, + labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, + labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, + labels.Label{Name: "wwwwww", Value: "wwwwww"}, + labels.Label{Name: "xxxxxxxxx", Value: "xxxxxxxxx"}, + labels.Label{Name: "yyyyyyyyyyyy", Value: "yyyyyyyyyyyy"}, + } + sort.Sort(expectLset) + + for i := 0; i < b.N; i++ { + lset := labels.Labels{ + labels.Label{Name: "abcdefg01", Value: "hijklmn1"}, + labels.Label{Name: "abcdefg02", Value: "hijklmn2"}, + labels.Label{Name: "abcdefg03", Value: "hijklmn3"}, + labels.Label{Name: "abcdefg04", Value: "hijklmn4"}, + labels.Label{Name: "abcdefg05", Value: "hijklmn5"}, + labels.Label{Name: "abcdefg06", Value: "hijklmn6"}, + labels.Label{Name: "abcdefg07", Value: "hijklmn7"}, + labels.Label{Name: "abcdefg08", Value: "hijklmn8"}, + labels.Label{Name: "abcdefg09", Value: "hijklmn9"}, + labels.Label{Name: "abcdefg10", Value: "hijklmn10"}, + labels.Label{Name: "abcdefg11", Value: "hijklmn11"}, + labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, + labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, + } + actual, _ := Process(lset, cfgs...) + var _ = actual + // require.Equal(b, actual, expectLset) + } +} From 2d0d3333712e596ec2f1f4e4596fdb7ae0466e6f Mon Sep 17 00:00:00 2001 From: "Xiaochao Dong (@damnever)" Date: Sat, 25 Mar 2023 10:42:20 +0800 Subject: [PATCH 002/137] Fix lint issue Signed-off-by: Xiaochao Dong (@damnever) --- model/relabel/relabel_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index d3815afe62..bd51b9771e 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -905,8 +905,7 @@ func BenchmarkRelabel_ReplaceAddLabel(b *testing.B) { labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, } - actual, _ := Process(lset, cfgs...) - var _ = actual + _, _ = Process(lset, cfgs...) // require.Equal(b, actual, expectLset) } } From 1601b2a79e0116f9d3a3e30915a6899e73c96feb Mon Sep 17 00:00:00 2001 From: "Xiaochao Dong (@damnever)" Date: Wed, 29 Mar 2023 11:20:59 +0800 Subject: [PATCH 003/137] check new line in target Signed-off-by: Xiaochao Dong (@damnever) --- model/relabel/relabel.go | 5 +++++ model/relabel/relabel_test.go | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 83b1a91474..7607138b5c 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -269,6 +269,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { case Replace: // Fast path to add or delete label pair. if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !containsNewLine(cfg.TargetLabel) && !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { if !model.LabelName(cfg.TargetLabel).IsValid() || cfg.Replacement == "" { lb.Del(cfg.TargetLabel) @@ -331,3 +332,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { func varInRegexTemplate(template string) bool { return strings.Contains(template, "$") } + +func containsNewLine(s string) bool { + return strings.Contains(s, "\r\n") || strings.Contains(s, "\n") +} diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index bd51b9771e..86844cf280 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -858,6 +858,8 @@ func BenchmarkRelabel_ReplaceAddLabel(b *testing.B) { "wwwwww": "wwwwww", "xxxxxxxxx": "xxxxxxxxx", "yyyyyyyyyyyy": "yyyyyyyyyyyy", + "new\nline1": "dropped", + "new\r\nline2": "dropped", "${0}": "dropped", "dropped": "${0}", } { @@ -905,7 +907,7 @@ func BenchmarkRelabel_ReplaceAddLabel(b *testing.B) { labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, } - _, _ = Process(lset, cfgs...) - // require.Equal(b, actual, expectLset) + actual, _ := Process(lset, cfgs...) + require.Equal(b, actual, expectLset) } } From b3b5c0022e5c05088733519f104db2c8b5c54974 Mon Sep 17 00:00:00 2001 From: "Xiaochao Dong (@damnever)" Date: Mon, 25 Dec 2023 15:14:25 +0800 Subject: [PATCH 004/137] Simplify the logic as per the comments Signed-off-by: Xiaochao Dong (@damnever) --- model/relabel/relabel.go | 11 +---- model/relabel/relabel_test.go | 89 +++++++++++------------------------ 2 files changed, 29 insertions(+), 71 deletions(-) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 7607138b5c..d169ed2f22 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -269,13 +269,8 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { case Replace: // Fast path to add or delete label pair. if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && - !containsNewLine(cfg.TargetLabel) && !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { - if !model.LabelName(cfg.TargetLabel).IsValid() || cfg.Replacement == "" { - lb.Del(cfg.TargetLabel) - } else { - lb.Set(cfg.TargetLabel, cfg.Replacement) - } + lb.Set(cfg.TargetLabel, cfg.Replacement) break } @@ -332,7 +327,3 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { func varInRegexTemplate(template string) bool { return strings.Contains(template, "$") } - -func containsNewLine(s string) bool { - return strings.Contains(s, "\r\n") || strings.Contains(s, "\n") -} diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 86844cf280..7652798f55 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -15,7 +15,6 @@ package relabel import ( "fmt" - "sort" "testing" "github.com/prometheus/common/model" @@ -838,6 +837,34 @@ func BenchmarkRelabel(b *testing.B) { "__scrape_timeout__", "10s", "job", "kubernetes-pods"), }, + { + name: "static label pair", + config: ` + - replacement: wwwwww + target_label: wwwwww + - replacement: yyyyyyyyyyyy + target_label: xxxxxxxxx + - replacement: xxxxxxxxx + target_label: yyyyyyyyyyyy + - source_labels: ["something"] + target_label: with_source_labels + replacement: value + - replacement: dropped + target_label: ${0} + - replacement: ${0} + target_label: dropped`, + lbls: labels.FromStrings( + "abcdefg01", "hijklmn1", + "abcdefg02", "hijklmn2", + "abcdefg03", "hijklmn3", + "abcdefg04", "hijklmn4", + "abcdefg05", "hijklmn5", + "abcdefg06", "hijklmn6", + "abcdefg07", "hijklmn7", + "abcdefg08", "hijklmn8", + "job", "foo", + ), + }, } for i := range tests { err := yaml.UnmarshalStrict([]byte(tests[i].config), &tests[i].cfgs) @@ -851,63 +878,3 @@ func BenchmarkRelabel(b *testing.B) { }) } } - -func BenchmarkRelabel_ReplaceAddLabel(b *testing.B) { - cfgs := []*Config{} - for k, v := range map[string]string{ - "wwwwww": "wwwwww", - "xxxxxxxxx": "xxxxxxxxx", - "yyyyyyyyyyyy": "yyyyyyyyyyyy", - "new\nline1": "dropped", - "new\r\nline2": "dropped", - "${0}": "dropped", - "dropped": "${0}", - } { - cfgs = append(cfgs, &Config{ - Action: DefaultRelabelConfig.Action, - Separator: DefaultRelabelConfig.Separator, - Regex: DefaultRelabelConfig.Regex, - TargetLabel: k, - Replacement: v, - }) - } - expectLset := labels.Labels{ - labels.Label{Name: "abcdefg01", Value: "hijklmn1"}, - labels.Label{Name: "abcdefg02", Value: "hijklmn2"}, - labels.Label{Name: "abcdefg03", Value: "hijklmn3"}, - labels.Label{Name: "abcdefg04", Value: "hijklmn4"}, - labels.Label{Name: "abcdefg05", Value: "hijklmn5"}, - labels.Label{Name: "abcdefg06", Value: "hijklmn6"}, - labels.Label{Name: "abcdefg07", Value: "hijklmn7"}, - labels.Label{Name: "abcdefg08", Value: "hijklmn8"}, - labels.Label{Name: "abcdefg09", Value: "hijklmn9"}, - labels.Label{Name: "abcdefg10", Value: "hijklmn10"}, - labels.Label{Name: "abcdefg11", Value: "hijklmn11"}, - labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, - labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, - labels.Label{Name: "wwwwww", Value: "wwwwww"}, - labels.Label{Name: "xxxxxxxxx", Value: "xxxxxxxxx"}, - labels.Label{Name: "yyyyyyyyyyyy", Value: "yyyyyyyyyyyy"}, - } - sort.Sort(expectLset) - - for i := 0; i < b.N; i++ { - lset := labels.Labels{ - labels.Label{Name: "abcdefg01", Value: "hijklmn1"}, - labels.Label{Name: "abcdefg02", Value: "hijklmn2"}, - labels.Label{Name: "abcdefg03", Value: "hijklmn3"}, - labels.Label{Name: "abcdefg04", Value: "hijklmn4"}, - labels.Label{Name: "abcdefg05", Value: "hijklmn5"}, - labels.Label{Name: "abcdefg06", Value: "hijklmn6"}, - labels.Label{Name: "abcdefg07", Value: "hijklmn7"}, - labels.Label{Name: "abcdefg08", Value: "hijklmn8"}, - labels.Label{Name: "abcdefg09", Value: "hijklmn9"}, - labels.Label{Name: "abcdefg10", Value: "hijklmn10"}, - labels.Label{Name: "abcdefg11", Value: "hijklmn11"}, - labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, - labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, - } - actual, _ := Process(lset, cfgs...) - require.Equal(b, actual, expectLset) - } -} From aa3e58358b9ea5316dcdfdf69b607f56966efba2 Mon Sep 17 00:00:00 2001 From: Daniel Kimsey Date: Tue, 15 Nov 2022 15:30:12 -0600 Subject: [PATCH 005/137] consul: Add support for catalog list services filter This adds support for Consul's Catalog [List Services][^1] API's `filter` parameter added in 1.14.x. This parameter grants the operator more flexibility to do server-side filtering of the Catalog, before Prometheus subscribes for updates. Operators can use this to improve both the performance of Prometheus's Consul SD and reduce the impact of enumerating large catalogs. [^1]: https://developer.hashicorp.com/consul/api-docs/v1.14.x/catalog Signed-off-by: Daniel Kimsey --- discovery/consul/consul.go | 10 ++++++++-- discovery/consul/consul_test.go | 23 +++++++++++++++++++++++ docs/configuration/configuration.md | 9 ++++++--- 3 files changed, 37 insertions(+), 5 deletions(-) diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 40eed7697a..d75ad6a1b0 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -113,8 +113,11 @@ type SDConfig struct { Services []string `yaml:"services,omitempty"` // A list of tags used to filter instances inside a service. Services must contain all tags in the list. ServiceTags []string `yaml:"tags,omitempty"` - // Desired node metadata. + // Desired node metadata. As of Consul 1.14, consider `filter` instead. NodeMeta map[string]string `yaml:"node_meta,omitempty"` + // Consul filter string + // See https://www.consul.io/api-docs/catalog#filtering-1, for syntax + Filter string `yaml:"filter,omitempty"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } @@ -174,6 +177,7 @@ type Discovery struct { watchedServices []string // Set of services which will be discovered. watchedTags []string // Tags used to filter instances of a service. watchedNodeMeta map[string]string + watchedFilter string allowStale bool refreshInterval time.Duration finalizer func() @@ -218,6 +222,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere watchedServices: conf.Services, watchedTags: conf.ServiceTags, watchedNodeMeta: conf.NodeMeta, + watchedFilter: conf.Filter, allowStale: conf.AllowStale, refreshInterval: time.Duration(conf.RefreshInterval), clientDatacenter: conf.Datacenter, @@ -361,13 +366,14 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { // entire list of services. func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) { catalog := d.client.Catalog() - level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ",")) + level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ","), "filter", d.watchedFilter) opts := &consul.QueryOptions{ WaitIndex: *lastIndex, WaitTime: watchTimeout, AllowStale: d.allowStale, NodeMeta: d.watchedNodeMeta, + Filter: d.watchedFilter, } t0 := time.Now() srvs, meta, err := catalog.Services(opts.WithContext(ctx)) diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index e3bc7938f5..6418956411 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -252,6 +252,8 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) { case "/v1/catalog/services?index=1&wait=120000ms": time.Sleep(5 * time.Second) response = ServicesTestAnswer + case "/v1/catalog/services?filter=NodeMeta.rack_name+%3D%3D+%222304%22&index=1&wait=120000ms": + response = ServicesTestAnswer default: t.Errorf("Unhandled consul call: %s", r.URL) } @@ -369,6 +371,27 @@ func TestAllOptions(t *testing.T) { <-ch } +// Watch the test service with a specific tag and node-meta via Filter parameter. +func TestFilterOption(t *testing.T) { + stub, config := newServer(t) + defer stub.Close() + + config.Services = []string{"test"} + config.Filter = `NodeMeta.rack_name == "2304"` + config.Token = "fake-token" + + d := newDiscovery(t, config) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan []*targetgroup.Group) + go func() { + d.Run(ctx, ch) + close(ch) + }() + checkOneTarget(t, <-ch) + cancel() +} + func TestGetDatacenterShouldReturnError(t *testing.T) { for _, tc := range []struct { handler func(http.ResponseWriter, *http.Request) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index d751a4084e..24b913f70d 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -716,14 +716,17 @@ The following meta labels are available on targets during [relabeling](#relabel_ services: [ - ] -# See https://www.consul.io/api/catalog.html#list-nodes-for-service to know more -# about the possible filters that can be used. +# A Consul Filter expression used to filter the catalog results +# See https://www.consul.io/api-docs/catalog#list-services to know more +# about the filter expressions that can be used. +[ filter: ] +# The `tags` and `node_meta` fields are deprecated in Consul in favor of `filter`. # An optional list of tags used to filter nodes for a given service. Services must contain all tags in the list. tags: [ - ] -# Node metadata key/value pairs to filter nodes for a given service. +# Node metadata key/value pairs to filter nodes for a given service. As of Consul 1.14, consider `filter` instead. [ node_meta: [ : ... ] ] From 33adbe47b14736bec9811eda9709fc4d6365d622 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 23 Aug 2024 09:30:22 +0100 Subject: [PATCH 006/137] [PERF] TSDB: Grow postings by doubling Go's built-in append() grows larger slices with factor 1.3, which means we do a lot more allocating and copying for larger postings. Signed-off-by: Bryan Boreham --- tsdb/index/postings.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index bfe74c323d..5ed41f7698 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -345,13 +345,22 @@ func (p *MemPostings) Add(id storage.SeriesRef, lset labels.Labels) { p.mtx.Unlock() } +func appendWithExponentialGrowth[T any](a []T, v T) []T { + if cap(a) < len(a)+1 { + newList := make([]T, len(a), len(a)*2+1) + copy(newList, a) + a = newList + } + return append(a, v) +} + func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) { nm, ok := p.m[l.Name] if !ok { nm = map[string][]storage.SeriesRef{} p.m[l.Name] = nm } - list := append(nm[l.Value], id) + list := appendWithExponentialGrowth(nm[l.Value], id) nm[l.Value] = list if !p.ordered { From e67358d203864018ecbbe8c74c1cb3af3be4b2b4 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 4 Sep 2024 15:39:05 +1000 Subject: [PATCH 007/137] histogram: include counter reset hint in test expression output Signed-off-by: Charles Korn --- model/histogram/float_histogram.go | 11 +++++++++++ promql/parser/parse_test.go | 16 ++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 2a37ea66d4..1777afdbf1 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -230,6 +230,17 @@ func (h *FloatHistogram) TestExpression() string { res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) } + switch m.CounterResetHint { + case UnknownCounterReset: + // Unknown is the default, don't add anything. + case CounterReset: + res = append(res, fmt.Sprintf("counter_reset_hint:reset")) + case NotCounterReset: + res = append(res, fmt.Sprintf("counter_reset_hint:not_reset")) + case GaugeType: + res = append(res, fmt.Sprintf("counter_reset_hint:gauge")) + } + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { if len(spans) > 1 { panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 37748323ce..40e6809183 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -4385,6 +4385,22 @@ func TestHistogramTestExpression(t *testing.T) { }, expected: `{{offset:-3 buckets:[5.1 0 0 0 0 10 7] n_offset:-1 n_buckets:[4.1 5 0 0 7 8 9]}}`, }, + { + name: "known counter reset hint", + input: histogram.FloatHistogram{ + Schema: 1, + Sum: -0.3, + Count: 3.1, + ZeroCount: 7.1, + ZeroThreshold: 0.05, + PositiveBuckets: []float64{5.1, 10, 7}, + PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}}, + NegativeBuckets: []float64{4.1, 5}, + NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}}, + CounterResetHint: histogram.CounterReset, + }, + expected: `{{schema:1 count:3.1 sum:-0.3 z_bucket:7.1 z_bucket_w:0.05 counter_reset_hint:reset offset:-3 buckets:[5.1 10 7] n_offset:-5 n_buckets:[4.1 5]}}`, + }, } { t.Run(test.name, func(t *testing.T) { expression := test.input.TestExpression() From 90dc1b45dbb448f6ce0ff9349dcd06e76db4f525 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 4 Sep 2024 15:47:07 +1000 Subject: [PATCH 008/137] promqltest: use test expression format for histograms in assertion failure messages Signed-off-by: Charles Korn --- promql/promqltest/test.go | 10 ++++++++-- promql/promqltest/test_test.go | 6 +++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 065e52e33f..bab8388622 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -779,7 +779,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { } if !compareNativeHistogram(expected.H.Compact(0), actual.H.Compact(0)) { - return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s)) + return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H.TestExpression(), actual.H.TestExpression(), formatSeriesResult(s)) } } } @@ -995,7 +995,13 @@ func formatSeriesResult(s promql.Series) string { histogramPlural = "" } - return fmt.Sprintf("%v float point%s %v and %v histogram point%s %v", len(s.Floats), floatPlural, s.Floats, len(s.Histograms), histogramPlural, s.Histograms) + histograms := make([]string, 0, len(s.Histograms)) + + for _, p := range s.Histograms { + histograms = append(histograms, fmt.Sprintf("%v @[%v]", p.H.TestExpression(), p.T)) + } + + return fmt.Sprintf("%v float point%s %v and %v histogram point%s %v", len(s.Floats), floatPlural, s.Floats, len(s.Histograms), histogramPlural, histograms) } // HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil. diff --git a/promql/promqltest/test_test.go b/promql/promqltest/test_test.go index 49b43eb126..bd965b00b5 100644 --- a/promql/promqltest/test_test.go +++ b/promql/promqltest/test_test.go @@ -381,7 +381,7 @@ load 5m eval range from 0 to 10m step 5m testmetric testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:7 count:1 buckets:[1] offset:1}} {{schema:-1 sum:8 count:1 buckets:[1] offset:1}} `, - expectedError: `error in eval testmetric (line 5): expected histogram value at index 1 (t=300000) for {__name__="testmetric"} to be {count:1, sum:7, (1,4]:1}, but got {count:1, sum:5, (1,4]:1} (result has 0 float points [] and 3 histogram points [{count:1, sum:4, (1,4]:1} @[0] {count:1, sum:5, (1,4]:1} @[300000] {count:1, sum:6, (1,4]:1} @[600000]])`, + expectedError: `error in eval testmetric (line 5): expected histogram value at index 1 (t=300000) for {__name__="testmetric"} to be {{schema:-1 count:1 sum:7 offset:1 buckets:[1]}}, but got {{schema:-1 count:1 sum:5 counter_reset_hint:not_reset offset:1 buckets:[1]}} (result has 0 float points [] and 3 histogram points [{{schema:-1 count:1 sum:4 offset:1 buckets:[1]}} @[0] {{schema:-1 count:1 sum:5 counter_reset_hint:not_reset offset:1 buckets:[1]}} @[300000] {{schema:-1 count:1 sum:6 counter_reset_hint:not_reset offset:1 buckets:[1]}} @[600000]])`, }, "range query with too many points for query time range": { input: testData + ` @@ -532,7 +532,7 @@ load 5m eval range from 0 to 5m step 5m testmetric testmetric 2 3 `, - expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 0 float points [] and 2 histogram points [{count:0, sum:0} @[0] {count:0, sum:0} @[300000]]`, + expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 0 float points [] and 2 histogram points [{{}} @[0] {{counter_reset_hint:not_reset}} @[300000]]`, }, "range query with expected mixed results": { input: ` @@ -552,7 +552,7 @@ load 5m eval range from 0 to 5m step 5m testmetric testmetric {{}} 3 `, - expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{count:0, sum:0} @[300000]])`, + expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{{}} @[300000]])`, }, "instant query with expected scalar result": { input: ` From 4da551578c691591d54bddf38fd8b1620c5faa73 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 4 Sep 2024 16:33:18 +1000 Subject: [PATCH 009/137] Fix test broken by inclusion of `counter_reset_hint` Signed-off-by: Charles Korn --- cmd/promtool/testdata/unittest.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/promtool/testdata/unittest.yml b/cmd/promtool/testdata/unittest.yml index ff511729ba..d6224d785f 100644 --- a/cmd/promtool/testdata/unittest.yml +++ b/cmd/promtool/testdata/unittest.yml @@ -69,13 +69,13 @@ tests: eval_time: 2m exp_samples: - labels: "test_histogram_repeat" - histogram: "{{count:2 sum:3 buckets:[2]}}" + histogram: "{{count:2 sum:3 counter_reset_hint:not_reset buckets:[2]}}" - expr: test_histogram_increase eval_time: 2m exp_samples: - labels: "test_histogram_increase" - histogram: "{{count:4 sum:5.6 buckets:[4]}}" + histogram: "{{count:4 sum:5.6 counter_reset_hint:not_reset buckets:[4]}}" # Ensure a value is stale as soon as it is marked as such. - expr: test_stale From 58149206014faeb3ce0e0e7c4981751992e764ed Mon Sep 17 00:00:00 2001 From: Mario Fernandez Date: Wed, 24 Jul 2024 11:49:10 +0200 Subject: [PATCH 010/137] Fix: optimize .* regexp performance Shortcut for `.*` matches newlines as well. Add preamble change ^(?s: Add test dotAll flag por al regex Add and fix regex tests Signed-off-by: Mario Fernandez --- model/labels/regexp.go | 4 +- model/labels/regexp_test.go | 116 +++++++++++++++++----------------- model/relabel/relabel.go | 6 +- model/relabel/relabel_test.go | 23 +++++++ promql/functions.go | 2 +- tsdb/querier.go | 4 ++ tsdb/querier_test.go | 65 +++++++++++++++++++ 7 files changed, 156 insertions(+), 64 deletions(-) diff --git a/model/labels/regexp.go b/model/labels/regexp.go index d2151d83dd..3df9435194 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -63,13 +63,13 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { // available, even if the string matcher is faster. m.matchString = m.stringMatcher.Matches } else { - parsed, err := syntax.Parse(v, syntax.Perl) + parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL) if err != nil { return nil, err } // Simplify the syntax tree to run faster. parsed = parsed.Simplify() - m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$") + m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$") if err != nil { return nil, err } diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index 24875e64ef..8df0dbb023 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -121,7 +121,7 @@ func TestFastRegexMatcher_MatchString(t *testing.T) { t.Parallel() m, err := NewFastRegexMatcher(r) require.NoError(t, err) - re := regexp.MustCompile("^(?:" + r + ")$") + re := regexp.MustCompile("^(?s:" + r + ")$") require.Equal(t, re.MatchString(v), m.MatchString(v)) }) } @@ -167,7 +167,7 @@ func TestOptimizeConcatRegex(t *testing.T) { } for _, c := range cases { - parsed, err := syntax.Parse(c.regex, syntax.Perl) + parsed, err := syntax.Parse(c.regex, syntax.Perl|syntax.DotNL) require.NoError(t, err) prefix, suffix, contains := optimizeConcatRegex(parsed) @@ -248,7 +248,7 @@ func TestFindSetMatches(t *testing.T) { c := c t.Run(c.pattern, func(t *testing.T) { t.Parallel() - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matches, actualCaseSensitive := findSetMatches(parsed) require.Equal(t, c.expMatches, matches) @@ -348,15 +348,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { pattern string exp StringMatcher }{ - {".*", anyStringWithoutNewlineMatcher{}}, - {".*?", anyStringWithoutNewlineMatcher{}}, + {".*", trueMatcher{}}, + {".*?", trueMatcher{}}, {"(?s:.*)", trueMatcher{}}, - {"(.*)", anyStringWithoutNewlineMatcher{}}, - {"^.*$", anyStringWithoutNewlineMatcher{}}, - {".+", &anyNonEmptyStringMatcher{matchNL: false}}, + {"(.*)", trueMatcher{}}, + {"^.*$", trueMatcher{}}, + {".+", &anyNonEmptyStringMatcher{matchNL: true}}, {"(?s:.+)", &anyNonEmptyStringMatcher{matchNL: true}}, - {"^.+$", &anyNonEmptyStringMatcher{matchNL: false}}, - {"(.+)", &anyNonEmptyStringMatcher{matchNL: false}}, + {"^.+$", &anyNonEmptyStringMatcher{matchNL: true}}, + {"(.+)", &anyNonEmptyStringMatcher{matchNL: true}}, {"", emptyStringMatcher{}}, {"^$", emptyStringMatcher{}}, {"^foo$", &equalStringMatcher{s: "foo", caseSensitive: true}}, @@ -366,23 +366,23 @@ func TestStringMatcherFromRegexp(t *testing.T) { {`(?i:((foo1|foo2|bar)))`, orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})}, {"^((?i:foo|oo)|(bar))$", orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO", caseSensitive: false}, &equalStringMatcher{s: "OO", caseSensitive: false}, &equalStringMatcher{s: "bar", caseSensitive: true}})}, {"(?i:(foo1|foo2|bar))", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})}, - {".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: anyStringWithoutNewlineMatcher{}}}, - {"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, + {".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: trueMatcher{}}}, + {"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, {"10\\.0\\.(1|2)\\.+", nil}, - {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{}, suffix: "foo", suffixCaseSensitive: true}}, - {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: anyStringWithoutNewlineMatcher{}}}, - {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, - {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: false}}})}, - {"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: nil}}, + {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{matchNL: true}, suffix: "foo", suffixCaseSensitive: true}}, + {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: trueMatcher{}}}, + {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, + {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: true}}})}, + {"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: nil}}, // we don't support case insensitive matching for contains. // This is because there's no strings.IndexOfFold function. // We can revisit later if this is really popular by using strings.ToUpper. @@ -393,15 +393,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { {".*foo.*bar.*", nil}, {`\d*`, nil}, {".", nil}, - {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: anyStringWithoutNewlineMatcher{}}}}}, + {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: trueMatcher{}}}}}, // This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat. // It would make the code too complex to handle it. {"(.+)/(foo.*|bar$)", nil}, // Case sensitive alternate with same literal prefix and .* suffix. - {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: anyStringWithoutNewlineMatcher{}}}}}, + {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: trueMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: trueMatcher{}}}}}, // Case insensitive alternate with same literal prefix and .* suffix. - {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, - {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, + {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}}, + {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}}, // Concatenated variable length selectors are not supported. {"foo.*.*", nil}, {"foo.+.+", nil}, @@ -410,15 +410,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { {"aaa.?.?", nil}, {"aaa.?.*", nil}, // Regexps with ".?". - {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, + {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, {"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, - {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}}, + {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}}, {"f.?o", nil}, } { c := c t.Run(c.pattern, func(t *testing.T) { t.Parallel() - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matches := stringMatcherFromRegexp(parsed) require.Equal(t, c.exp, matches) @@ -437,16 +437,16 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { { pattern: "(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", expectedLiteralPrefixMatchers: 3, - expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX"}, - expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "xyz-016a-ixb-dp\n"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"}, + expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Case insensitive. { pattern: "(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", expectedLiteralPrefixMatchers: 3, - expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX"}, - expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp", "xyz-016a-ixb-dp\n"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"}, + expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp"}, }, // Nested literal prefixes, case sensitive. @@ -474,13 +474,13 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher. numPrefixMatchers := 0 @@ -523,16 +523,16 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) { { pattern: "(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)", expectedLiteralSuffixMatchers: 2, - expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op"}, - expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"}, + expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Case insensitive. { pattern: "(?i)(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)", expectedLiteralSuffixMatchers: 2, - expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op"}, - expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"}, + expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Nested literal suffixes, case sensitive. @@ -552,13 +552,13 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) { }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains literalSuffixStringMatcher. numSuffixMatchers := 0 @@ -598,26 +598,26 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) { { pattern: "test.?", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"test", "test!"}, - expectedNotMatches: []string{"test\n", "tes", "test!!"}, + expectedMatches: []string{"test\n", "test", "test!"}, + expectedNotMatches: []string{"tes", "test!!"}, }, { pattern: ".?test", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"test", "!test"}, - expectedNotMatches: []string{"\ntest", "tes", "test!"}, + expectedMatches: []string{"\ntest", "test", "!test"}, + expectedNotMatches: []string{"tes", "test!"}, }, { pattern: "(aaa.?|bbb.?)", expectedZeroOrOneMatchers: 2, - expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX"}, - expectedNotMatches: []string{"aa", "aaaXX", "aaa\n", "bb", "bbbXX", "bbb\n"}, + expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX", "aaa\n", "bbb\n"}, + expectedNotMatches: []string{"aa", "aaaXX", "bb", "bbbXX"}, }, { pattern: ".*aaa.?", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX"}, - expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX", "XXXaaa\n"}, + expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX", "XXXaaa\n"}, + expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX"}, }, // Match newline. @@ -632,18 +632,18 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) { { pattern: "(aaa.?|((?s).?bbb.+))", expectedZeroOrOneMatchers: 2, - expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX"}, - expectedNotMatches: []string{"aa", "aaa\n", "Xbbb", "\nbbb"}, + expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX", "aaa\n"}, + expectedNotMatches: []string{"aa", "Xbbb", "\nbbb"}, }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains zeroOrOneCharacterStringMatcher. numZeroOrOneMatchers := 0 @@ -1112,7 +1112,7 @@ func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) { } b.Logf("regexp: %s", re) - parsed, err := syntax.Parse(re, syntax.Perl) + parsed, err := syntax.Parse(re, syntax.Perl|syntax.DotNL) require.NoError(b, err) unoptimized := stringMatcherFromRegexpInternal(parsed) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index a880465969..8cc367bc8d 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -171,7 +171,7 @@ type Regexp struct { // NewRegexp creates a new anchored Regexp and returns an error if the // passed-in regular expression does not compile. func NewRegexp(s string) (Regexp, error) { - regex, err := regexp.Compile("^(?:" + s + ")$") + regex, err := regexp.Compile("^(?s:" + s + ")$") return Regexp{Regexp: regex}, err } @@ -218,8 +218,8 @@ func (re Regexp) String() string { } str := re.Regexp.String() - // Trim the anchor `^(?:` prefix and `)$` suffix. - return str[4 : len(str)-2] + // Trim the anchor `^(?s:` prefix and `)$` suffix. + return str[5 : len(str)-2] } // Process returns a relabeled version of the given label set. The relabel configurations diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index fc9952134d..d6e5cb43ea 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -569,6 +569,29 @@ func TestRelabel(t *testing.T) { }, drop: true, }, + { + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "d", + Separator: ";", + Replacement: "match${1}", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "d": "match", + }), + }, } for _, test := range tests { diff --git a/promql/functions.go b/promql/functions.go index 182b69b080..5bdcce65df 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1480,7 +1480,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio regexStr = stringFromArg(args[4]) ) - regex, err := regexp.Compile("^(?:" + regexStr + ")$") + regex, err := regexp.Compile("^(?s:" + regexStr + ")$") if err != nil { panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) } diff --git a/tsdb/querier.go b/tsdb/querier.go index 912c950329..1083cbba0e 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -254,6 +254,10 @@ func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matc return nil, err } its = append(its, allPostings) + case m.Type == labels.MatchRegexp && m.Value == ".*": + // .* regexp matches any string: do nothing. + case m.Type == labels.MatchNotRegexp && m.Value == ".*": + return index.EmptyPostings(), nil case labelMustBeSet[m.Name]: // If this matcher must be non-empty, we can be smarter. matchesEmpty := m.Matches("") diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 0821b2b370..9ec807f803 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2689,6 +2689,7 @@ func TestPostingsForMatchers(t *testing.T) { app.Append(0, labels.FromStrings("n", "1"), 0, 0) app.Append(0, labels.FromStrings("n", "1", "i", "a"), 0, 0) app.Append(0, labels.FromStrings("n", "1", "i", "b"), 0, 0) + app.Append(0, labels.FromStrings("n", "1", "i", "\n"), 0, 0) app.Append(0, labels.FromStrings("n", "2"), 0, 0) app.Append(0, labels.FromStrings("n", "2.5"), 0, 0) require.NoError(t, app.Commit()) @@ -2704,6 +2705,7 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2722,6 +2724,7 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), labels.FromStrings("n", "2"), labels.FromStrings("n", "2.5"), }, @@ -2739,6 +2742,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2750,6 +2754,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2757,6 +2762,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, // Regex. @@ -2766,6 +2772,7 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2801,6 +2808,7 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2808,6 +2816,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, // Not regex. @@ -2816,6 +2825,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2849,12 +2859,14 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a?$")}, exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2862,6 +2874,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2895,6 +2908,7 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), labels.FromStrings("n", "2"), }, }, @@ -2942,6 +2956,57 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "2.5"), }, }, + // Test shortcut for i=~".*" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", ".*")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1"), + labels.FromStrings("n", "1", "i", "a"), + labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), + labels.FromStrings("n", "2"), + labels.FromStrings("n", "2.5"), + }, + }, + // Test shortcut for n=~".*" and i=~"^.*$" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", ".*"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1"), + labels.FromStrings("n", "1", "i", "a"), + labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), + labels.FromStrings("n", "2"), + labels.FromStrings("n", "2.5"), + }, + }, + // Test shortcut for n=~"^.*$" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "^.*$"), labels.MustNewMatcher(labels.MatchEqual, "i", "a")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1", "i", "a"), + }, + }, + // Test shortcut for i!~".*" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")}, + exp: []labels.Labels{}, + }, + // Test shortcut for n!~"^.*$", i!~".*". First one triggers empty result. + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "^.*$"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")}, + exp: []labels.Labels{}, + }, + // Test shortcut i!~".*" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", ".*"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")}, + exp: []labels.Labels{}, + }, + // Test shortcut i!~"^.*$" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*$")}, + exp: []labels.Labels{}, + }, } ir, err := h.Index() From ac5377873fe5067f9d42bc0e1470ac425bf549b8 Mon Sep 17 00:00:00 2001 From: Julien Date: Mon, 16 Sep 2024 17:17:50 +0200 Subject: [PATCH 011/137] mantine UI: Distinguish between Not Ready and Stopping Signed-off-by: Julien --- cmd/prometheus/main.go | 4 +- .../src/components/ReadinessWrapper.tsx | 51 ++++++++++++++----- web/web.go | 33 +++++++++--- web/web_test.go | 14 ++--- 4 files changed, 71 insertions(+), 31 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 7bd51054e3..1081dce285 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -980,7 +980,7 @@ func main() { }, func(err error) { close(cancel) - webHandler.SetReady(false) + webHandler.SetReady(web.Stopping) }, ) } @@ -1159,7 +1159,7 @@ func main() { reloadReady.Close() - webHandler.SetReady(true) + webHandler.SetReady(web.Ready) level.Info(logger).Log("msg", "Server is ready to receive web requests.") <-cancel return nil diff --git a/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx b/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx index 52ae0485cb..dbfcba5550 100644 --- a/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx +++ b/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx @@ -1,17 +1,23 @@ import { FC, PropsWithChildren, useEffect, useState } from "react"; +import { IconAlertTriangle } from "@tabler/icons-react"; import { useAppDispatch } from "../state/hooks"; import { updateSettings, useSettings } from "../state/settingsSlice"; import { useSuspenseAPIQuery } from "../api/api"; import { WALReplayStatus } from "../api/responseTypes/walreplay"; -import { Progress, Stack, Title } from "@mantine/core"; +import { Progress, Alert } from "@mantine/core"; import { useSuspenseQuery } from "@tanstack/react-query"; +const STATUS_STARTING = "is starting up..."; +const STATUS_STOPPING = "is shutting down..."; +const STATUS_LOADING = "is not ready..."; + const ReadinessLoader: FC = () => { - const { pathPrefix } = useSettings(); + const { pathPrefix, agentMode } = useSettings(); const dispatch = useAppDispatch(); // Query key is incremented every second to retrigger the status fetching. const [queryKey, setQueryKey] = useState(0); + const [statusMessage, setStatusMessage] = useState(""); // Query readiness status. const { data: ready } = useSuspenseQuery({ @@ -28,8 +34,16 @@ const ReadinessLoader: FC = () => { }); switch (res.status) { case 200: + setStatusMessage(""); // Clear any status message when ready. return true; case 503: + // Check the custom header `X-Prometheus-Stopping` for stopping information. + if (res.headers.get("X-Prometheus-Stopping") === "true") { + setStatusMessage(STATUS_STOPPING); + } else { + setStatusMessage(STATUS_STARTING); + } + return false; default: throw new Error(res.statusText); @@ -40,14 +54,16 @@ const ReadinessLoader: FC = () => { }, }); - // Query WAL replay status. + // Only call WAL replay status API if the service is starting up. + const shouldQueryWALReplay = statusMessage === STATUS_STARTING; + const { - data: { - data: { min, max, current }, - }, + data: walData, + isSuccess: walSuccess, } = useSuspenseAPIQuery({ path: "/status/walreplay", key: ["walreplay", queryKey], + enabled: shouldQueryWALReplay, // Only enabled when service is starting up. }); useEffect(() => { @@ -62,21 +78,28 @@ const ReadinessLoader: FC = () => { }, []); return ( - - Starting up... - {max > 0 && ( + } + maw={500} + mx="auto" + mt="lg" + > + {shouldQueryWALReplay && walSuccess && walData && ( <> -

- Replaying WAL ({current}/{max}) -

+ + Replaying WAL ({walData.data.current}/{walData.data.max}) + )} -
+ ); }; diff --git a/web/web.go b/web/web.go index b4d285108c..6b0d9cd187 100644 --- a/web/web.go +++ b/web/web.go @@ -102,6 +102,14 @@ var newUIReactRouterServerPaths = []string{ "/tsdb-status", } +type ReadyStatus uint32 + +const ( + NotReady ReadyStatus = iota + Ready + Stopping +) + // withStackTrace logs the stack trace in case the request panics. The function // will re-raise the error which will then be handled by the net/http package. // It is needed because the go-kit log package doesn't manage properly the @@ -331,7 +339,7 @@ func New(logger log.Logger, o *Options) *Handler { now: model.Now, } - h.SetReady(false) + h.SetReady(NotReady) factorySPr := func(_ context.Context) api_v1.ScrapePoolsRetriever { return h.scrapeManager } factoryTr := func(_ context.Context) api_v1.TargetRetriever { return h.scrapeManager } @@ -572,30 +580,39 @@ func serveDebug(w http.ResponseWriter, req *http.Request) { } // SetReady sets the ready status of our web Handler. -func (h *Handler) SetReady(v bool) { - if v { - h.ready.Store(1) +func (h *Handler) SetReady(v ReadyStatus) { + if v == Ready { + h.ready.Store(uint32(Ready)) h.metrics.readyStatus.Set(1) return } - h.ready.Store(0) + h.ready.Store(uint32(v)) h.metrics.readyStatus.Set(0) } // Verifies whether the server is ready or not. func (h *Handler) isReady() bool { - return h.ready.Load() > 0 + return ReadyStatus(h.ready.Load()) == Ready } // Checks if server is ready, calls f if it is, returns 503 if it is not. func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - if h.isReady() { + switch ReadyStatus(h.ready.Load()) { + case Ready: f(w, r) - } else { + case NotReady: + w.WriteHeader(http.StatusServiceUnavailable) + w.Header().Set("X-Prometheus-Stopping", "false") + fmt.Fprintf(w, "Service Unavailable") + case Stopping: + w.Header().Set("X-Prometheus-Stopping", "true") w.WriteHeader(http.StatusServiceUnavailable) fmt.Fprintf(w, "Service Unavailable") + default: + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Unknown state") } } } diff --git a/web/web_test.go b/web/web_test.go index b660746b13..696ba80d1d 100644 --- a/web/web_test.go +++ b/web/web_test.go @@ -156,7 +156,7 @@ func TestReadyAndHealthy(t *testing.T) { cleanupTestResponse(t, resp) // Set to ready. - webHandler.SetReady(true) + webHandler.SetReady(Ready) for _, u := range []string{ baseURL + "/-/healthy", @@ -260,7 +260,7 @@ func TestRoutePrefix(t *testing.T) { cleanupTestResponse(t, resp) // Set to ready. - webHandler.SetReady(true) + webHandler.SetReady(Ready) resp, err = http.Get(baseURL + opts.RoutePrefix + "/-/healthy") require.NoError(t, err) @@ -307,7 +307,7 @@ func TestDebugHandler(t *testing.T) { }, } handler := New(nil, opts) - handler.SetReady(true) + handler.SetReady(Ready) w := httptest.NewRecorder() @@ -349,7 +349,7 @@ func TestHTTPMetrics(t *testing.T) { counter := handler.metrics.requestCounter require.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusServiceUnavailable))))) - handler.SetReady(true) + handler.SetReady(Ready) for range [2]int{} { code = getReady() require.Equal(t, http.StatusOK, code) @@ -358,7 +358,7 @@ func TestHTTPMetrics(t *testing.T) { require.Equal(t, 2, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusOK))))) require.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusServiceUnavailable))))) - handler.SetReady(false) + handler.SetReady(NotReady) for range [2]int{} { code = getReady() require.Equal(t, http.StatusServiceUnavailable, code) @@ -537,7 +537,7 @@ func TestAgentAPIEndPoints(t *testing.T) { opts.Flags = map[string]string{} webHandler := New(nil, opts) - webHandler.SetReady(true) + webHandler.SetReady(Ready) webHandler.config = &config.Config{} webHandler.notifier = ¬ifier.Manager{} l, err := webHandler.Listeners() @@ -692,7 +692,7 @@ func TestMultipleListenAddresses(t *testing.T) { time.Sleep(5 * time.Second) // Set to ready. - webHandler.SetReady(true) + webHandler.SetReady(Ready) for _, port := range []string{port1, port2} { baseURL := "http://localhost" + port From c328d5fc8820ec7fe45296cfd4de8036b58f4a3a Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Tue, 17 Sep 2024 23:04:10 -0700 Subject: [PATCH 012/137] fix rwv2 build write request benchmark, also change how the memory usage (#14925) is reported for these benchmarks to more accurately reflect what's actually allocated Signed-off-by: Callum Styan --- storage/remote/queue_manager_test.go | 63 ++++++++++++---------------- 1 file changed, 27 insertions(+), 36 deletions(-) diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 38f3fbea28..6c61a477f6 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -1859,13 +1859,6 @@ func BenchmarkBuildWriteRequest(b *testing.B) { } pBuf := proto.NewBuffer(nil) - // Warmup buffers - for i := 0; i < 10; i++ { - populateTimeSeries(batch, seriesBuff, true, true) - buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, &buff, nil, "snappy") - } - - b.ResetTimer() totalSize := 0 for i := 0; i < b.N; i++ { populateTimeSeries(batch, seriesBuff, true, true) @@ -1897,45 +1890,43 @@ func BenchmarkBuildWriteRequest(b *testing.B) { func BenchmarkBuildV2WriteRequest(b *testing.B) { noopLogger := log.NewNopLogger() - type testcase struct { - batch []timeSeries - } - testCases := []testcase{ - {createDummyTimeSeries(2)}, - {createDummyTimeSeries(10)}, - {createDummyTimeSeries(100)}, - } - for _, tc := range testCases { + bench := func(b *testing.B, batch []timeSeries) { symbolTable := writev2.NewSymbolTable() buff := make([]byte, 0) - seriesBuff := make([]writev2.TimeSeries, len(tc.batch)) + seriesBuff := make([]writev2.TimeSeries, len(batch)) for i := range seriesBuff { seriesBuff[i].Samples = []writev2.Sample{{}} seriesBuff[i].Exemplars = []writev2.Exemplar{{}} } pBuf := []byte{} - // Warmup buffers - for i := 0; i < 10; i++ { - populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true) - buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy") - } - - b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) { - totalSize := 0 - for j := 0; j < b.N; j++ { - populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true) - b.ResetTimer() - req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy") - if err != nil { - b.Fatal(err) - } - symbolTable.Reset() - totalSize += len(req) - b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op") + totalSize := 0 + for i := 0; i < b.N; i++ { + populateV2TimeSeries(&symbolTable, batch, seriesBuff, true, true) + req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy") + if err != nil { + b.Fatal(err) } - }) + totalSize += len(req) + b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op") + } } + + twoBatch := createDummyTimeSeries(2) + tenBatch := createDummyTimeSeries(10) + hundredBatch := createDummyTimeSeries(100) + + b.Run("2 instances", func(b *testing.B) { + bench(b, twoBatch) + }) + + b.Run("10 instances", func(b *testing.B) { + bench(b, tenBatch) + }) + + b.Run("1k instances", func(b *testing.B) { + bench(b, hundredBatch) + }) } func TestDropOldTimeSeries(t *testing.T) { From 69619990f8570b63d34758aae30496b555b517b7 Mon Sep 17 00:00:00 2001 From: Augustin Husson Date: Wed, 18 Sep 2024 11:53:09 +0200 Subject: [PATCH 013/137] UI/PromQL: autocomplete topk like aggregation function parameters Signed-off-by: Augustin Husson --- .../src/complete/hybrid.test.ts | 20 ++++++- .../codemirror-promql/src/complete/hybrid.ts | 55 +++++++++++++++++-- 2 files changed, 69 insertions(+), 6 deletions(-) diff --git a/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts b/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts index 02d2e99f52..4728f18228 100644 --- a/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts +++ b/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts @@ -583,12 +583,30 @@ describe('analyzeCompletion test', () => { pos: 5, expectedContext: [{ kind: ContextKind.AtModifiers }], }, + { + title: 'autocomplete topk params', + expr: 'topk()', + pos: 5, + expectedContext: [{ kind: ContextKind.Number }], + }, + { + title: 'autocomplete topk params 2', + expr: 'topk(inf,)', + pos: 9, + expectedContext: [{ kind: ContextKind.MetricName, metricName: '' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation }], + }, + { + title: 'autocomplete topk params 3', + expr: 'topk(inf,r)', + pos: 10, + expectedContext: [{ kind: ContextKind.MetricName, metricName: 'r' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation }], + }, ]; testCases.forEach((value) => { it(value.title, () => { const state = createEditorState(value.expr); const node = syntaxTree(state).resolve(value.pos, -1); - const result = analyzeCompletion(state, node); + const result = analyzeCompletion(state, node, value.pos); expect(value.expectedContext).toEqual(result); }); }); diff --git a/web/ui/module/codemirror-promql/src/complete/hybrid.ts b/web/ui/module/codemirror-promql/src/complete/hybrid.ts index 28fef816d2..6018b58743 100644 --- a/web/ui/module/codemirror-promql/src/complete/hybrid.ts +++ b/web/ui/module/codemirror-promql/src/complete/hybrid.ts @@ -54,6 +54,12 @@ import { QuotedLabelName, NumberDurationLiteralInDurationContext, NumberDurationLiteral, + AggregateOp, + Topk, + Bottomk, + LimitK, + LimitRatio, + CountValues, } from '@prometheus-io/lezer-promql'; import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete'; import { EditorState } from '@codemirror/state'; @@ -185,7 +191,7 @@ export function computeStartCompletePosition(state: EditorState, node: SyntaxNod if (node.type.id === LabelMatchers || node.type.id === GroupingLabels) { start = computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node, pos); } else if ( - node.type.id === FunctionCallBody || + (node.type.id === FunctionCallBody && node.firstChild === null) || (node.type.id === StringLiteral && (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher)) ) { // When the cursor is between bracket, quote, we need to increment the starting position to avoid to consider the open bracket/ first string. @@ -198,6 +204,7 @@ export function computeStartCompletePosition(state: EditorState, node: SyntaxNod // So we have to analyze the string about the current node to see if the duration unit is already present or not. (node.type.id === NumberDurationLiteralInDurationContext && !durationTerms.map((v) => v.label).includes(currentText[currentText.length - 1])) || (node.type.id === NumberDurationLiteral && node.parent?.type.id === 0 && node.parent.parent?.type.id === SubqueryExpr) || + (node.type.id === FunctionCallBody && isItATopKLikeAggregationFunc(node) && node.firstChild !== null) || (node.type.id === 0 && (node.parent?.type.id === OffsetExpr || node.parent?.type.id === MatrixSelector || @@ -208,10 +215,28 @@ export function computeStartCompletePosition(state: EditorState, node: SyntaxNod return start; } +function isItATopKLikeAggregationFunc(functionCallBody: SyntaxNode): boolean { + const prevSibling = functionCallBody.prevSibling; + if (prevSibling !== null && prevSibling.type.id === AggregateOp) { + const aggregationOpType = prevSibling.firstChild; + if ( + aggregationOpType !== null && + (aggregationOpType.type.id == Topk || + aggregationOpType.type.id === Bottomk || + aggregationOpType.type.id === LimitK || + aggregationOpType.type.id === LimitRatio || + aggregationOpType.type.id === CountValues) + ) { + return true; + } + } + return false; +} + // analyzeCompletion is going to determinate what should be autocompleted. // The value of the autocompletion is then calculate by the function buildCompletion. // Note: this method is exported for testing purpose only. Do not use it directly. -export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context[] { +export function analyzeCompletion(state: EditorState, node: SyntaxNode, pos: number): Context[] { const result: Context[] = []; switch (node.type.id) { case 0: // 0 is the id of the error node @@ -330,7 +355,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context } // now we have to know if we have two Expr in the direct children of the `parent` const containExprTwice = containsChild(parent, 'Expr', 'Expr'); - if (containExprTwice) { + if (containExprTwice && parent.type.id !== FunctionCallBody) { if (parent.type.id === BinaryExpr && !containsAtLeastOneChild(parent, 0)) { // We are likely in the case 1 or 5 result.push( @@ -460,7 +485,23 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context result.push({ kind: ContextKind.Duration }); break; case FunctionCallBody: - // In this case we are in the given situation: + // For aggregation function such as Topk, the first parameter is a number. + // The second one is an expression. + // When moving to the second parameter, the node is an error node. + // Unfortunately, as a current node, codemirror doesn't give us the error node but instead the FunctionCallBody + // The tree looks like that: PromQL(AggregateExpr(AggregateOp(Topk),FunctionCallBody(NumberDurationLiteral,⚠))) + // So, we need to figure out if the cursor is on the first parameter or in the second. + if (isItATopKLikeAggregationFunc(node)) { + if (node.firstChild === null || (node.firstChild.from <= pos && node.firstChild.to >= pos)) { + // it means the FunctionCallBody has no child, which means we are autocompleting the first parameter + result.push({ kind: ContextKind.Number }); + break; + } + // at this point we are necessary autocompleting the second parameter + result.push({ kind: ContextKind.MetricName, metricName: '' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation }); + break; + } + // In all other cases, we are in the given situation: // sum() or in rate() // with the cursor between the bracket. So we can autocomplete the metric, the function and the aggregation. result.push({ kind: ContextKind.MetricName, metricName: '' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation }); @@ -516,7 +557,11 @@ export class HybridComplete implements CompleteStrategy { promQL(context: CompletionContext): Promise | CompletionResult | null { const { state, pos } = context; const tree = syntaxTree(state).resolve(pos, -1); - const contexts = analyzeCompletion(state, tree); + // The lines above can help you to print the current lezer tree. + // It's useful when you are trying to understand why it doesn't autocomplete. + // console.log(syntaxTree(state).topNode.toString()); + // console.log(`current node: ${tree.type.name}`); + const contexts = analyzeCompletion(state, tree, pos); let asyncResult: Promise = Promise.resolve([]); let completeSnippet = false; let span = true; From b6107cc888f254cc12d3cafffc5454d483097cd5 Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Wed, 18 Sep 2024 20:21:25 +1000 Subject: [PATCH 014/137] Make rate possible non-counter annotation consistent (#14910) * Make rate possible non-counter annotation consistent Previously a PossibleNonCounterInfo annotation would be left in cases where a range-vector selects 1 float data point, even if no more points are selected in order to calculate a rate. This change ensures an output float exists before emitting such an annotation. This fixes an inconsistency where a series with mixed data (ie, a float and a native histogram) would emit an annotation without any points. For example, ``` load 1m series{label="a"} 1 {{schema:1 sum:10 count:5 buckets:[1 2 3]}} eval instant at 1m rate(series[1m1s]) ``` Would have a PossibleNonCounterInfo annotation. Wheras ``` load 1m series{label="a"} {{schema:1 sum:10 count:5 buckets:[1 2 3]}} {{schema:1 sum:15 count:10 buckets:[1 2 3]}} eval instant at 1m rate(series[1m1s]) ``` Would not. --------- Signed-off-by: Joshua Hesketh --- CHANGELOG.md | 4 +++ promql/engine.go | 5 ++- promql/engine_test.go | 73 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 79 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 49aeeb50ef..a60dea1698 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## unreleased + +* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910 + ## 3.0.0-beta.0 / 2024-09-05 Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes. diff --git a/promql/engine.go b/promql/engine.go index 8653436819..983e016666 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1742,9 +1742,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, ev.samplesStats.UpdatePeak(ev.currentSamples) if e.Func.Name == "rate" || e.Func.Name == "increase" { - samples := inMatrix[0] - metricName := samples.Metric.Get(labels.MetricName) - if metricName != "" && len(samples.Floats) > 0 && + metricName := inMatrix[0].Metric.Get(labels.MetricName) + if metricName != "" && len(ss.Floats) > 0 && !strings.HasSuffix(metricName, "_total") && !strings.HasSuffix(metricName, "_sum") && !strings.HasSuffix(metricName, "_count") && diff --git a/promql/engine_test.go b/promql/engine_test.go index 94fa8d0b49..e4171eb5bd 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -19,6 +19,7 @@ import ( "fmt" "sort" "strconv" + "strings" "sync" "testing" "time" @@ -3708,3 +3709,75 @@ histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum: }, }) } + +func TestRateAnnotations(t *testing.T) { + testCases := map[string]struct { + data string + expr string + expectedWarningAnnotations []string + expectedInfoAnnotations []string + }{ + "info annotation when two samples are selected": { + data: ` + series 1 2 + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{ + `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "series" (1:6)`, + }, + }, + "no info annotations when no samples": { + data: ` + series + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when selecting one sample": { + data: ` + series 1 2 + `, + expr: "rate(series[10s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when no samples due to mixed data types": { + data: ` + series{label="a"} 1 {{schema:1 sum:15 count:10 buckets:[1 2 3]}} + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{ + `PromQL warning: encountered a mix of histograms and floats for metric name "series" (1:6)`, + }, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when selecting two native histograms": { + data: ` + series{label="a"} {{schema:1 sum:10 count:5 buckets:[1 2 3]}} {{schema:1 sum:15 count:10 buckets:[1 2 3]}} + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + store := promqltest.LoadedStorage(t, "load 1m\n"+strings.TrimSpace(testCase.data)) + t.Cleanup(func() { _ = store.Close() }) + + engine := newTestEngine(t) + query, err := engine.NewInstantQuery(context.Background(), store, nil, testCase.expr, timestamp.Time(0).Add(1*time.Minute)) + require.NoError(t, err) + t.Cleanup(query.Close) + + res := query.Exec(context.Background()) + require.NoError(t, res.Err) + + warnings, infos := res.Warnings.AsStrings(testCase.expr, 0, 0) + testutil.RequireEqual(t, testCase.expectedWarningAnnotations, warnings) + testutil.RequireEqual(t, testCase.expectedInfoAnnotations, infos) + }) + } +} From b65f1b65605882cbbc21552ad8f280b3bc3abfef Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 16 Sep 2024 08:48:49 +0100 Subject: [PATCH 015/137] TSDB: Improve xor-chunk benchmarks Benchmarks must do the same work N times. Run 3 cases, where the values are constant, vary a bit, and vary a lot. Also aim for 120 samples same as TSDB default. Signed-off-by: Bryan Boreham --- tsdb/chunkenc/chunk_test.go | 50 +++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/tsdb/chunkenc/chunk_test.go b/tsdb/chunkenc/chunk_test.go index b72492a08b..6319db6eec 100644 --- a/tsdb/chunkenc/chunk_test.go +++ b/tsdb/chunkenc/chunk_test.go @@ -251,54 +251,62 @@ func benchmarkIterator(b *testing.B, newChunk func() Chunk) { } } +func newXORChunk() Chunk { + return NewXORChunk() +} + func BenchmarkXORIterator(b *testing.B) { - benchmarkIterator(b, func() Chunk { - return NewXORChunk() - }) + benchmarkIterator(b, newXORChunk) } func BenchmarkXORAppender(b *testing.B) { - benchmarkAppender(b, func() Chunk { - return NewXORChunk() + r := rand.New(rand.NewSource(1)) + b.Run("constant", func(b *testing.B) { + benchmarkAppender(b, func() (int64, float64) { + return 1000, 0 + }, newXORChunk) + }) + b.Run("random steps", func(b *testing.B) { + benchmarkAppender(b, func() (int64, float64) { + return int64(r.Intn(100) - 50 + 15000), // 15 seconds +- up to 100ms of jitter. + float64(r.Intn(100) - 50) // Varying from -50 to +50 in 100 discrete steps. + }, newXORChunk) + }) + b.Run("random 0-1", func(b *testing.B) { + benchmarkAppender(b, func() (int64, float64) { + return int64(r.Intn(100) - 50 + 15000), // 15 seconds +- up to 100ms of jitter. + r.Float64() // Random between 0 and 1.0. + }, newXORChunk) }) } -func benchmarkAppender(b *testing.B, newChunk func() Chunk) { +func benchmarkAppender(b *testing.B, deltas func() (int64, float64), newChunk func() Chunk) { var ( t = int64(1234123324) v = 1243535.123 ) + const nSamples = 120 // Same as tsdb.DefaultSamplesPerChunk. var exp []pair - for i := 0; i < b.N; i++ { - // t += int64(rand.Intn(10000) + 1) - t += int64(1000) - // v = rand.Float64() - v += float64(100) + for i := 0; i < nSamples; i++ { + dt, dv := deltas() + t += dt + v += dv exp = append(exp, pair{t: t, v: v}) } b.ReportAllocs() b.ResetTimer() - var chunks []Chunk - for i := 0; i < b.N; { + for i := 0; i < b.N; i++ { c := newChunk() a, err := c.Appender() if err != nil { b.Fatalf("get appender: %s", err) } - j := 0 for _, p := range exp { - if j > 250 { - break - } a.Append(p.t, p.v) - i++ - j++ } - chunks = append(chunks, c) } - fmt.Println("num", b.N, "created chunks", len(chunks)) } From b9a9689aae1d366dc0071131b7bbe6d91fb7d2aa Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 18 Sep 2024 10:35:29 +0100 Subject: [PATCH 016/137] [PERF] Chunk encoding: simplify writeByte Rather than append a zero then set the value at that position, append the value. Signed-off-by: Bryan Boreham --- tsdb/chunkenc/bstream.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tsdb/chunkenc/bstream.go b/tsdb/chunkenc/bstream.go index f9668c68c2..6e01798f72 100644 --- a/tsdb/chunkenc/bstream.go +++ b/tsdb/chunkenc/bstream.go @@ -95,10 +95,8 @@ func (b *bstream) writeByte(byt byte) { // Complete the last byte with the leftmost b.count bits from byt. b.stream[i] |= byt >> (8 - b.count) - b.stream = append(b.stream, 0) - i++ // Write the remainder, if any. - b.stream[i] = byt << b.count + b.stream = append(b.stream, byt< Date: Wed, 18 Sep 2024 10:33:44 +0100 Subject: [PATCH 017/137] [PERF] Chunk encoding: combine timestamp writes Instead of a 2-bit write followed by a 14-bit write, do two 8-bit writes, which goes much faster since it avoids looping. Signed-off-by: Bryan Boreham --- tsdb/chunkenc/xor.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index ec5db39ad4..ac75a5994b 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -191,8 +191,8 @@ func (a *xorAppender) Append(t int64, v float64) { case dod == 0: a.b.writeBit(zero) case bitRange(dod, 14): - a.b.writeBits(0b10, 2) - a.b.writeBits(uint64(dod), 14) + a.b.writeByte(0b10<<6 | (uint8(dod>>8) & (1<<6 - 1))) // 0b10 size code combined with 6 bits of dod. + a.b.writeByte(uint8(dod)) // Bottom 8 bits of dod. case bitRange(dod, 17): a.b.writeBits(0b110, 3) a.b.writeBits(uint64(dod), 17) From 15cea39136bbdbcae3016b84bd646a3220bd7580 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Mon, 16 Sep 2024 17:45:32 +0200 Subject: [PATCH 018/137] promql: put holt_winters behind experimental feature flag Signed-off-by: Jan Fajerski --- docs/querying/functions.md | 2 ++ promql/parser/functions.go | 7 ++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index e13628c5c5..54d0d05ddc 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -382,6 +382,8 @@ variance of observations in a native histogram. ## `holt_winters()` +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** + `holt_winters(v range-vector, sf scalar, tf scalar)` produces a smoothed value for time series based on the range in `v`. The lower the smoothing factor `sf`, the more importance is given to old data. The higher the trend factor `tf`, the diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 99b41321fe..4fe3c80935 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -203,9 +203,10 @@ var Functions = map[string]*Function{ ReturnType: ValueTypeVector, }, "holt_winters": { - Name: "holt_winters", - ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, - ReturnType: ValueTypeVector, + Name: "holt_winters", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Experimental: true, }, "hour": { Name: "hour", From e8c2d916eca4f4cd516cbf27fd24c719140cc2b8 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 18 Sep 2024 13:40:44 +0100 Subject: [PATCH 019/137] lint Signed-off-by: Bryan Boreham --- tsdb/chunkenc/chunk_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tsdb/chunkenc/chunk_test.go b/tsdb/chunkenc/chunk_test.go index 6319db6eec..e6b89be401 100644 --- a/tsdb/chunkenc/chunk_test.go +++ b/tsdb/chunkenc/chunk_test.go @@ -308,5 +308,4 @@ func benchmarkAppender(b *testing.B, deltas func() (int64, float64), newChunk fu a.Append(p.t, p.v) } } - } From 0d22a91267e5520cdf130517cbcfbcb5b6088318 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Mon, 9 Sep 2024 13:41:08 +0200 Subject: [PATCH 020/137] Merge pull request #14874 from krajorama/fix-panic-in-ooo-query2 BUGFIX: TSDB: panic in chunk querier --- tsdb/head_test.go | 64 +++++++++++++++++++++++++++++++++++-------- tsdb/ooo_head_read.go | 9 ++++++ 2 files changed, 61 insertions(+), 12 deletions(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index c338ddaddc..7b5349cfca 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3493,6 +3493,56 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) { } func TestQueryOOOHeadDuringTruncate(t *testing.T) { + testQueryOOOHeadDuringTruncate(t, + func(db *DB, minT, maxT int64) (storage.LabelQuerier, error) { + return db.Querier(minT, maxT) + }, + func(t *testing.T, lq storage.LabelQuerier, minT, _ int64) { + // Samples + q, ok := lq.(storage.Querier) + require.True(t, ok) + ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) + require.True(t, ss.Next()) + s := ss.At() + require.False(t, ss.Next()) // One series. + it := s.Iterator(nil) + require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. + require.Equal(t, minT, it.AtT()) // It is an in-order sample. + require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. + require.Equal(t, minT+50, it.AtT()) // it is an out-of-order sample. + require.NoError(t, it.Err()) + }, + ) +} + +func TestChunkQueryOOOHeadDuringTruncate(t *testing.T) { + testQueryOOOHeadDuringTruncate(t, + func(db *DB, minT, maxT int64) (storage.LabelQuerier, error) { + return db.ChunkQuerier(minT, maxT) + }, + func(t *testing.T, lq storage.LabelQuerier, minT, _ int64) { + // Chunks + q, ok := lq.(storage.ChunkQuerier) + require.True(t, ok) + ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) + require.True(t, ss.Next()) + s := ss.At() + require.False(t, ss.Next()) // One series. + metaIt := s.Iterator(nil) + require.True(t, metaIt.Next()) + meta := metaIt.At() + // Samples + it := meta.Chunk.Iterator(nil) + require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. + require.Equal(t, minT, it.AtT()) // It is an in-order sample. + require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. + require.Equal(t, minT+50, it.AtT()) // it is an out-of-order sample. + require.NoError(t, it.Err()) + }, + ) +} + +func testQueryOOOHeadDuringTruncate(t *testing.T, makeQuerier func(db *DB, minT, maxT int64) (storage.LabelQuerier, error), verify func(t *testing.T, q storage.LabelQuerier, minT, maxT int64)) { const maxT int64 = 6000 dir := t.TempDir() @@ -3545,7 +3595,7 @@ func TestQueryOOOHeadDuringTruncate(t *testing.T) { // Wait for the compaction to start. <-allowQueryToStart - q, err := db.Querier(1500, 2500) + q, err := makeQuerier(db, 1500, 2500) require.NoError(t, err) queryStarted <- struct{}{} // Unblock the compaction. ctx := context.Background() @@ -3562,17 +3612,7 @@ func TestQueryOOOHeadDuringTruncate(t *testing.T) { require.Empty(t, annots) require.Equal(t, []string{"b"}, res) - // Samples - ss := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - require.True(t, ss.Next()) - s := ss.At() - require.False(t, ss.Next()) // One series. - it := s.Iterator(nil) - require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. - require.Equal(t, int64(1500), it.AtT()) // It is an in-order sample. - require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. - require.Equal(t, int64(1550), it.AtT()) // it is an out-of-order sample. - require.NoError(t, it.Err()) + verify(t, q, 1500, 2500) require.NoError(t, q.Close()) // Cannot be deferred as the compaction waits for queries to close before finishing. diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index a3c959bc43..66ae93325d 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -586,15 +586,24 @@ func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIso } func (q *HeadAndOOOChunkQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + if q.querier == nil { + return nil, nil, nil + } return q.querier.LabelValues(ctx, name, hints, matchers...) } func (q *HeadAndOOOChunkQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + if q.querier == nil { + return nil, nil, nil + } return q.querier.LabelNames(ctx, hints, matchers...) } func (q *HeadAndOOOChunkQuerier) Close() error { q.chunkr.Close() + if q.querier == nil { + return nil + } return q.querier.Close() } From 5e68360dc8d7a40540794ad7f13aacf4ae23b3c8 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 10 Sep 2024 09:06:51 +0200 Subject: [PATCH 021/137] Fix error flood by downgrading OTel dependencies (#14884) Fixes #14859, although we'll have to see about a long-term fix. Hopefully it'll be fixed upstream with a follow-up version. Signed-off-by: Julius Volz --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 845e3277b8..4a2dd1c779 100644 --- a/go.mod +++ b/go.mod @@ -64,7 +64,7 @@ require ( github.com/vultr/govultr/v2 v2.17.2 go.opentelemetry.io/collector/pdata v1.14.1 go.opentelemetry.io/collector/semconv v0.108.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 @@ -81,7 +81,7 @@ require ( golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.24.0 - google.golang.org/api v0.196.0 + google.golang.org/api v0.195.0 google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed google.golang.org/grpc v1.66.0 google.golang.org/protobuf v1.34.2 diff --git a/go.sum b/go.sum index edb5b650bd..4fc4f93bd8 100644 --- a/go.sum +++ b/go.sum @@ -736,8 +736,8 @@ go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1 go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= @@ -1056,8 +1056,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg= -google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE= +google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= +google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= From 5ccb0694146d666e49068e82c159300fedef6b76 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 18 Sep 2024 10:26:31 -0400 Subject: [PATCH 022/137] Backward compatibility with upcoming index v3 Signed-off-by: Ganesh Vernekar --- tsdb/index/index.go | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 0e0e353719..db0b9b88b8 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -43,10 +43,12 @@ const ( // HeaderLen represents number of bytes reserved of index for header. HeaderLen = 5 - // FormatV1 represents 1 version of index. + // FormatV1 represents version 1 of index. FormatV1 = 1 - // FormatV2 represents 2 version of index. + // FormatV2 represents version 2 of index. FormatV2 = 2 + // FormatV3 represents version 3 of index. + FormatV3 = 3 indexFilename = "index" @@ -1193,7 +1195,9 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { } r.version = int(r.b.Range(4, 5)[0]) - if r.version != FormatV1 && r.version != FormatV2 { + switch r.version { + case FormatV1, FormatV2, FormatV3: + default: return nil, fmt.Errorf("unknown index file version %d", r.version) } @@ -1351,7 +1355,9 @@ func (s Symbols) Lookup(o uint32) (string, error) { B: s.bs.Range(0, s.bs.Len()), } - if s.version == FormatV2 { + if s.version == FormatV1 { + d.Skip(int(o)) + } else { if int(o) >= s.seen { return "", fmt.Errorf("unknown symbol offset %d", o) } @@ -1360,8 +1366,6 @@ func (s Symbols) Lookup(o uint32) (string, error) { for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- { d.UvarintBytes() } - } else { - d.Skip(int(o)) } sym := d.UvarintStr() if d.Err() != nil { @@ -1407,10 +1411,10 @@ func (s Symbols) ReverseLookup(sym string) (uint32, error) { if lastSymbol != sym { return 0, fmt.Errorf("unknown symbol %q", sym) } - if s.version == FormatV2 { - return uint32(res), nil + if s.version == FormatV1 { + return uint32(s.bs.Len() - lastLen), nil } - return uint32(s.bs.Len() - lastLen), nil + return uint32(res), nil } func (s Symbols) Size() int { @@ -1569,7 +1573,7 @@ func (r *Reader) LabelNamesFor(ctx context.Context, postings Postings) ([]string offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. - if r.version == FormatV2 { + if r.version != FormatV1 { offset = id * seriesByteAlign } @@ -1608,7 +1612,7 @@ func (r *Reader) LabelValueFor(ctx context.Context, id storage.SeriesRef, label offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. - if r.version == FormatV2 { + if r.version != FormatV1 { offset = id * seriesByteAlign } d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable) @@ -1634,7 +1638,7 @@ func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, ch offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. - if r.version == FormatV2 { + if r.version != FormatV1 { offset = id * seriesByteAlign } d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable) From 546f78000685db1a7635c49c487601aff8c858ab Mon Sep 17 00:00:00 2001 From: Julien Date: Thu, 19 Sep 2024 11:50:25 +0200 Subject: [PATCH 023/137] UI: Disallow sub-second zoom as this cause inconsistenices in the X axis in uPlot Fixes #9135 Signed-off-by: Julien --- web/ui/mantine-ui/src/pages/query/uPlotChartHelpers.ts | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/uPlotChartHelpers.ts b/web/ui/mantine-ui/src/pages/query/uPlotChartHelpers.ts index b7a81831c9..e9dad3665f 100644 --- a/web/ui/mantine-ui/src/pages/query/uPlotChartHelpers.ts +++ b/web/ui/mantine-ui/src/pages/query/uPlotChartHelpers.ts @@ -380,10 +380,11 @@ export const getUPlotOptions = ( hooks: { setSelect: [ (self: uPlot) => { - onSelectRange( - self.posToVal(self.select.left, "x"), - self.posToVal(self.select.left + self.select.width, "x") - ); + // Disallow sub-second zoom as this cause inconsistenices in the X axis in uPlot. + const leftVal = self.posToVal(self.select.left, "x"); + const rightVal = Math.max(self.posToVal(self.select.left + self.select.width, "x"), leftVal + 1); + + onSelectRange(leftVal, rightVal); }, ], }, From 81b9407f15a149bb6afdfd5ccf30a8c5de9c9885 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 17 Sep 2024 17:03:22 +0100 Subject: [PATCH 024/137] Prepare release 2.55.0-rc.0 Signed-off-by: Bryan Boreham --- CHANGELOG.md | 35 ++++++++++++++++++-- RELEASE.md | 1 + VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 +-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 ++++---- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 +-- 8 files changed, 47 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff222790fd..e183f894b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,10 +2,39 @@ ## unreleased -* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200 -* [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706 +## 2.55.0-rc.0 / 2024-09-17 + +* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727 +* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 +* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817 +* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815 +* [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734 +* [FEATURE] PromQL: Delay removal of `__name__` label - feature flag `promql-delayed-name-removal`. #14477 +* [FEATURE] OTLP receiver: Optional promotion of resource attributes to series labels. #14200 +* [FEATURE] API: Support multiple `--web.listen-address`. #14665 +* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346 +* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403 +* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506 +* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706 * [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 -* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042 +* [ENHANCEMENT] PromQL: Improve detail in distributed tracing. #14816 +* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655 +* [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621 +* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413 +* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 +* [PERF] Remote-Read: Support streaming mode. #11379 +* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874 +* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 +* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 +* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 +* [BUGFIX] fix(utf8): propagate validationScheme config to scraping options. #14880 +* [BUGFIX] PromQL: Experimental Native Histograms: Do not re-use spans between histograms. #14771 +* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810 +* [BUGFIX] Remote-Write: fix metadata sending for experimental Remote-Write V2. #14766 +* [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716 +* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14598, #14611, #14609, #14575, #14513 +* [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042 +* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450 ## 2.54.1 / 2024-08-27 diff --git a/RELEASE.md b/RELEASE.md index 0d3f7456cd..b978a3c226 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -59,6 +59,7 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) | | v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) | | v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) | +| v2.55 | 2024-09-17 | Bryan Boreham (GitHub: @bboreham) | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. diff --git a/VERSION b/VERSION index 3a40665f50..95a9e5028a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.54.1 +2.55.0-rc.0 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 1e52207f30..1589781dcb 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.54.1", + "version": "0.55.0-rc.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.54.1", + "@prometheus-io/lezer-promql": "0.55.0-rc.0", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index d863cd1c08..ba68d5622d 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.54.1", + "version": "0.55.0-rc.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index d425ecc9ab..99a6516bfd 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.54.1", + "version": "0.55.0-rc.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.54.1", + "version": "0.55.0-rc.0", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.54.1", + "version": "0.55.0-rc.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.54.1", + "@prometheus-io/lezer-promql": "0.55.0-rc.0", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.54.1", + "version": "0.55.0-rc.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.1", @@ -19352,7 +19352,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.54.1", + "version": "0.55.0-rc.0", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", @@ -19370,7 +19370,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.54.1", + "@prometheus-io/codemirror-promql": "0.55.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", diff --git a/web/ui/package.json b/web/ui/package.json index f97d7098b2..2525d295c3 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.2.2", "typescript": "^4.9.5" }, - "version": "0.54.1" + "version": "0.55.0-rc.0" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 8f29b7c149..5cf490496c 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.54.1", + "version": "0.55.0-rc.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.54.1", + "@prometheus-io/codemirror-promql": "0.55.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", From 6fcd225aeed1fb42e39dd6805d4447f9a9119f3d Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 15 Aug 2024 14:19:16 +0200 Subject: [PATCH 025/137] promql(native histograms): Introduce exponential interpolation The linear interpolation (assuming that observations are uniformly distributed within a bucket) is a solid and simple assumption in lack of any other information. However, the exponential bucketing used by standard schemas of native histograms has been chosen to cover the whole range of observations in a way that bucket populations are spread out over buckets in a reasonably way for typical distributions encountered in real-world scenarios. This is the origin of the idea implemented here: If we divide a given bucket into two (or more) smaller exponential buckets, we "most naturally" expect that the samples in the original buckets will split among those smaller buckets in a more or less uniform fashion. With this assumption, we end up with an "exponential interpolation", which therefore appears to be a better match for histograms with exponential bucketing. This commit leaves the linear interpolation in place for NHCB, but changes the interpolation for exponential native histograms to exponential. This affects `histogram_quantile` and `histogram_fraction` (because the latter is more or less the inverse of the former). The zero bucket has to be treated specially because the assumption above would lead to an "interpolation to zero" (the bucket density approaches infinity around zero, and with the postulated uniform usage of buckets, we would end up with an estimate of zero for all quantiles ending up in the zero bucket). We simply fall back to linear interpolation within the zero bucket. At the same time, this commit makes the call to stick with the assumption that the zero bucket only contains positive observations for native histograms without negative buckets (and vice versa). (This is an assumption relevant for interpolation. It is a mostly academic point, as the zero bucket is supposed to be very small anyway. However, in cases where it _is_ relevantly broad, the assumption helps a lot in practice.) This commit also updates and completes the documentation to match both details about interpolation. As a more high level note: The approach here attempts to strike a balance between a more simplistic approach without any assumption, and a more involved approach with more sophisticated assumptions. I will shortly describe both for reference: The "zero assumption" approach would be to not interpolate at all, but _always_ return the harmonic mean of the bucket boundaries of the bucket the quantile ends up in. This has the advantage of minimizing the maximum possible relative error of the quantile estimation. (Depending on the exact definition of the relative error of an estimation, there is also an argument to return the arithmetic mean of the bucket boundaries.) While limiting the maximum possible relative error is a good property, this approach would throw away the information if a quantile is closer to the upper or lower end of the population within a bucket. This can be valuable trending information in a dashboard. With any kind of interpolation, the maximum possible error of a quantile estimation increases to the full width of a bucket (i.e. it more than doubles for the harmonic mean approach, and precisely doubles for the arithmetic mean approach). However, in return the _expectation value_ of the error decreases. The increase of the theoretical maximum only has practical relevance for pathologic distributions. For example, if there are thousand observations within a bucket, they could _all_ be at the upper bound of the bucket. If the quantile calculation picks the 1st observation in the bucket as the relevant one, an interpolation will yield a value close to the lower bucket boundary, while the true quantile value is close to the upper boundary. The "fancy interpolation" approach would be one that analyses the _actual_ distribution of samples in the histogram. A lot of statistics could be applied based on the information we have available in the histogram. This would include the population of neighboring (or even all) buckets in the histogram. In general, the resolution of a native histogram should be quite high, and therefore, those "fancy" approaches would increase the computational cost quite a bit with very little practical benefits (i.e. just tiny corrections of the estimated quantile value). The results are also much harder to reason with. Signed-off-by: beorn7 --- docs/querying/functions.md | 85 ++++--- .../testdata/native_histograms.test | 232 ++++++++++++++---- promql/quantile.go | 109 ++++++-- 3 files changed, 337 insertions(+), 89 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index e13628c5c5..ecbf9d26c2 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -326,45 +326,70 @@ With native histograms, aggregating everything works as usual without any `by` c histogram_quantile(0.9, sum(rate(http_request_duration_seconds[10m]))) -The `histogram_quantile()` function interpolates quantile values by -assuming a linear distribution within a bucket. +In the (common) case that a quantile value does not coincide with a bucket +boundary, the `histogram_quantile()` function interpolates the quantile value +within the bucket the quantile value falls into. For classic histograms, for +native histograms with custom bucket boundaries, and for the zero bucket of +other native histograms, it assumes a uniform distribution of observations +within the bucket (also called _linear interpolation_). For the +non-zero-buckets of native histograms with a standard exponential bucketing +schema, the interpolation is done under the assumption that the samples within +the bucket are distributed in a way that they would uniformly populate the +buckets in a hypothetical histogram with higher resolution. (This is also +called _exponential interpolation_.) If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned. -The following is only relevant for classic histograms: If `b` contains -fewer than two buckets, `NaN` is returned. The highest bucket must have an -upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If a quantile is located -in the highest bucket, the upper bound of the second highest bucket is -returned. A lower limit of the lowest bucket is assumed to be 0 if the upper -bound of that bucket is greater than -0. In that case, the usual linear interpolation is applied within that -bucket. Otherwise, the upper bound of the lowest bucket is returned for -quantiles located in the lowest bucket. +Special cases for classic histograms: -You can use `histogram_quantile(0, v instant-vector)` to get the estimated minimum value stored in -a histogram. +* If `b` contains fewer than two buckets, `NaN` is returned. +* The highest bucket must have an upper bound of `+Inf`. (Otherwise, `NaN` is + returned.) +* If a quantile is located in the highest bucket, the upper bound of the second + highest bucket is returned. +* The lower limit of the lowest bucket is assumed to be 0 if the upper bound of + that bucket is greater than 0. In that case, the usual linear interpolation + is applied within that bucket. Otherwise, the upper bound of the lowest + bucket is returned for quantiles located in the lowest bucket. -You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in -a histogram. +Special cases for native histograms (relevant for the exact interpolation +happening within the zero bucket): -Buckets of classic histograms are cumulative. Therefore, the following should always be the case: +* A zero bucket with finite width is assumed to contain no negative + observations if the histogram has observations in positive buckets, but none + in negative buckets. +* A zero bucket with finite width is assumed to contain no positive + observations if the histogram has observations in negative buckets, but none + in positive buckets. -* The counts in the buckets are monotonically increasing (strictly non-decreasing). -* A lack of observations between the upper limits of two consecutive buckets results in equal counts -in those two buckets. +You can use `histogram_quantile(0, v instant-vector)` to get the estimated +minimum value stored in a histogram. -However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets -with `sum(rate(...))`) or invalid data might violate these assumptions. In that case, -`histogram_quantile` would be unable to return meaningful results. To mitigate the issue, -`histogram_quantile` assumes that tiny relative differences between consecutive buckets are happening -because of floating point precision errors and ignores them. (The threshold to ignore a difference -between two buckets is a trillionth (1e-12) of the sum of both buckets.) Furthermore, if there are -non-monotonic bucket counts even after this adjustment, they are increased to the value of the -previous buckets to enforce monotonicity. The latter is evidence for an actual issue with the input -data and is therefore flagged with an informational annotation reading `input to histogram_quantile -needed to be fixed for monotonicity`. If you encounter this annotation, you should find and remove -the source of the invalid data. +You can use `histogram_quantile(1, v instant-vector)` to get the estimated +maximum value stored in a histogram. + +Buckets of classic histograms are cumulative. Therefore, the following should +always be the case: + +* The counts in the buckets are monotonically increasing (strictly + non-decreasing). +* A lack of observations between the upper limits of two consecutive buckets + results in equal counts in those two buckets. + +However, floating point precision issues (e.g. small discrepancies introduced +by computing of buckets with `sum(rate(...))`) or invalid data might violate +these assumptions. In that case, `histogram_quantile` would be unable to return +meaningful results. To mitigate the issue, `histogram_quantile` assumes that +tiny relative differences between consecutive buckets are happening because of +floating point precision errors and ignores them. (The threshold to ignore a +difference between two buckets is a trillionth (1e-12) of the sum of both +buckets.) Furthermore, if there are non-monotonic bucket counts even after this +adjustment, they are increased to the value of the previous buckets to enforce +monotonicity. The latter is evidence for an actual issue with the input data +and is therefore flagged with an informational annotation reading `input to +histogram_quantile needed to be fixed for monotonicity`. If you encounter this +annotation, you should find and remove the source of the invalid data. ## `histogram_stddev()` and `histogram_stdvar()` diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index ee521f9c3a..ca4993660f 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -46,9 +46,12 @@ eval instant at 1m histogram_fraction(1, 2, single_histogram) eval instant at 1m histogram_fraction(0, 8, single_histogram) {} 1 -# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2. +# Median is 1.414213562373095 (2**2**-1, or sqrt(2)) due to +# exponential interpolation, i.e. the "midpoint" within range 1 < x <= +# 2 is assumed where the bucket boundary would be if we increased the +# resolution of the histogram by one step. eval instant at 1m histogram_quantile(0.5, single_histogram) - {} 1.5 + {} 1.414213562373095 clear @@ -68,8 +71,9 @@ eval instant at 5m histogram_avg(multi_histogram) eval instant at 5m histogram_fraction(1, 2, multi_histogram) {} 0.5 +# See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, multi_histogram) - {} 1.5 + {} 1.414213562373095 # Each entry should look the same as the first. @@ -85,8 +89,9 @@ eval instant at 50m histogram_avg(multi_histogram) eval instant at 50m histogram_fraction(1, 2, multi_histogram) {} 0.5 +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, multi_histogram) - {} 1.5 + {} 1.414213562373095 clear @@ -109,8 +114,9 @@ eval instant at 5m histogram_avg(incr_histogram) eval instant at 5m histogram_fraction(1, 2, incr_histogram) {} 0.6 +# See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, incr_histogram) - {} 1.5 + {} 1.414213562373095 eval instant at 50m incr_histogram @@ -129,16 +135,18 @@ eval instant at 50m histogram_avg(incr_histogram) eval instant at 50m histogram_fraction(1, 2, incr_histogram) {} 0.8571428571428571 +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, incr_histogram) - {} 1.5 + {} 1.414213562373095 # Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum. eval instant at 50m rate(incr_histogram[10m]) {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} # Calculate the 50th percentile of observations over the last 10m. +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m])) - {} 1.5 + {} 1.414213562373095 clear @@ -211,8 +219,9 @@ eval instant at 1m histogram_avg(negative_histogram) eval instant at 1m histogram_fraction(-2, -1, negative_histogram) {} 0.5 +# Exponential interpolation works the same as for positive buckets, just mirrored. eval instant at 1m histogram_quantile(0.5, negative_histogram) - {} -1.5 + {} -1.414213562373095 clear @@ -233,8 +242,9 @@ eval instant at 5m histogram_avg(two_samples_histogram) eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram) {} 0.5 +# See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, two_samples_histogram) - {} -1.5 + {} -1.414213562373095 clear @@ -392,20 +402,24 @@ eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1) eval instant at 10m histogram_quantile(1, histogram_quantile_1) {} 16 +# The following quantiles are within a bucket. Exponential +# interpolation is applied (rather than linear, as it is done for +# classic histograms), leading to slightly different quantile values. eval instant at 10m histogram_quantile(0.99, histogram_quantile_1) - {} 15.759999999999998 + {} 15.67072476139083 eval instant at 10m histogram_quantile(0.9, histogram_quantile_1) - {} 13.600000000000001 + {} 12.99603834169977 eval instant at 10m histogram_quantile(0.6, histogram_quantile_1) - {} 4.799999999999997 + {} 4.594793419988138 eval instant at 10m histogram_quantile(0.5, histogram_quantile_1) - {} 1.6666666666666665 + {} 1.5874010519681994 +# Linear interpolation within the zero bucket after all. eval instant at 10m histogram_quantile(0.1, histogram_quantile_1) - {} 0.0006000000000000001 + {} 0.0006 eval instant at 10m histogram_quantile(0, histogram_quantile_1) {} 0 @@ -425,17 +439,20 @@ eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2) eval instant at 10m histogram_quantile(1, histogram_quantile_2) {} 0 +# Again, the quantile values here are slightly different from what +# they would be with linear interpolation. Note that quantiles +# ending up in the zero bucket are linearly interpolated after all. eval instant at 10m histogram_quantile(0.99, histogram_quantile_2) - {} -6.000000000000048e-05 + {} -0.00006 eval instant at 10m histogram_quantile(0.9, histogram_quantile_2) - {} -0.0005999999999999996 + {} -0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_2) - {} -1.6666666666666667 + {} -1.5874010519681996 eval instant at 10m histogram_quantile(0.1, histogram_quantile_2) - {} -13.6 + {} -12.996038341699768 eval instant at 10m histogram_quantile(0, histogram_quantile_2) {} -16 @@ -445,7 +462,9 @@ eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2) clear -# Apply quantile function to histogram with both positive and negative buckets with zero bucket. +# Apply quantile function to histogram with both positive and negative +# buckets with zero bucket. +# First positive buckets with exponential interpolation. load 10m histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -456,31 +475,34 @@ eval instant at 10m histogram_quantile(1, histogram_quantile_3) {} 16 eval instant at 10m histogram_quantile(0.99, histogram_quantile_3) - {} 15.519999999999996 + {} 15.34822590920423 eval instant at 10m histogram_quantile(0.9, histogram_quantile_3) - {} 11.200000000000003 + {} 10.556063286183155 eval instant at 10m histogram_quantile(0.7, histogram_quantile_3) - {} 1.2666666666666657 + {} 1.2030250360821164 +# Linear interpolation in the zero bucket, symmetrically centered around +# the zero point. eval instant at 10m histogram_quantile(0.55, histogram_quantile_3) - {} 0.0006000000000000005 + {} 0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_3) {} 0 eval instant at 10m histogram_quantile(0.45, histogram_quantile_3) - {} -0.0005999999999999996 + {} -0.0006 +# Finally negative buckets with mirrored exponential interpolation. eval instant at 10m histogram_quantile(0.3, histogram_quantile_3) - {} -1.266666666666667 + {} -1.2030250360821169 eval instant at 10m histogram_quantile(0.1, histogram_quantile_3) - {} -11.2 + {} -10.556063286183155 eval instant at 10m histogram_quantile(0.01, histogram_quantile_3) - {} -15.52 + {} -15.34822590920423 eval instant at 10m histogram_quantile(0, histogram_quantile_3) {} -16 @@ -490,6 +512,90 @@ eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3) clear +# Try different schemas. (The interpolation logic must not depend on the schema.) +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} 2.0 + {schema="0"} 1.4142135623730951 + {schema="+1"} 1.189207 + +eval instant at 1m histogram_fraction(0, 2, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.4142135623730951, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.189207, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +# The same as above, but one bucket "further to the right". +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} 8.0 + {schema="0"} 2.82842712474619 + {schema="+1"} 1.6817928305074292 + +eval instant at 1m histogram_fraction(0, 8, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(0, 2.82842712474619, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.6817928305074292, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +# And everything again but for negative buckets. +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} -2.0 + {schema="0"} -1.4142135623730951 + {schema="+1"} -1.189207 + +eval instant at 1m histogram_fraction(-2, 0, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(-1.4142135623730951, 0, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(-1.189207, 0, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} -8.0 + {schema="0"} -2.82842712474619 + {schema="+1"} -1.6817928305074292 + +eval instant at 1m histogram_fraction(-8, 0, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(-2.82842712474619, 0, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(-1.6817928305074292, 0, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + + # Apply fraction function to empty histogram. load 10m histogram_fraction_1 {{}}x1 @@ -515,11 +621,18 @@ eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2) eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2) {} 0.16666666666666666 +# Note that this result and the one above add up to 1. +eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) + {} 0.8333333333333334 + +# We are in the zero bucket, resulting in linear interpolation eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2) {} 0.08333333333333333 -eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) - {} 0.8333333333333334 +# Demonstrate that the inverse operation with histogram_quantile yields +# the original value with the non-trivial result above. +eval instant at 10m histogram_quantile(0.08333333333333333, histogram_fraction_2) + {} 0.0005 eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) {} 0 @@ -527,17 +640,30 @@ eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2) {} 0.25 +# More non-trivial results with interpolation involved below, including +# some round-trips via histogram_quantile to prove that the inverse +# operation leads to the same results. + +eval instant at 10m histogram_fraction(0, 1.5, histogram_fraction_2) + {} 0.4795739585136224 + eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2) - {} 0.125 + {} 0.10375937481971091 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2) {} 0.3333333333333333 +eval instant at 10m histogram_fraction(0, 6, histogram_fraction_2) + {} 0.6320802083934297 + +eval instant at 10m histogram_quantile(0.6320802083934297, histogram_fraction_2) + {} 6 + eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2) - {} 0.2916666666666667 + {} 0.29874687506009634 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2) - {} 0.16666666666666666 + {} 0.15250624987980724 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2) {} 0 @@ -600,6 +726,12 @@ eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3) eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3) {} 0.08333333333333333 +eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_3) + {} 0.9166666666666666 + +eval instant at 10m histogram_quantile(0.9166666666666666, histogram_fraction_3) + {} -0.0005 + eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3) {} 0 @@ -625,16 +757,22 @@ eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3) {} 0.25 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3) - {} 0.125 + {} 0.10375937481971091 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3) {} 0.3333333333333333 +eval instant at 10m histogram_fraction(-inf, -6, histogram_fraction_3) + {} 0.36791979160657035 + +eval instant at 10m histogram_quantile(0.36791979160657035, histogram_fraction_3) + {} -6 + eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3) - {} 0.2916666666666667 + {} 0.29874687506009634 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3) - {} 0.16666666666666666 + {} 0.15250624987980724 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3) {} 0 @@ -684,6 +822,18 @@ eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4) eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4) {} 0.08333333333333333 +eval instant at 10m histogram_fraction(-inf, 0.0005, histogram_fraction_4) + {} 0.5416666666666666 + +eval instant at 10m histogram_quantile(0.5416666666666666, histogram_fraction_4) + {} 0.0005 + +eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_4) + {} 0.4583333333333333 + +eval instant at 10m histogram_quantile(0.4583333333333333, histogram_fraction_4) + {} -0.0005 + eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4) {} 0.4166666666666667 @@ -694,31 +844,31 @@ eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4) {} 0.125 eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4) - {} 0.0625 + {} 0.051879687409855414 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4) {} 0.16666666666666666 eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4) - {} 0.14583333333333334 + {} 0.14937343753004825 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4) - {} 0.08333333333333333 + {} 0.07625312493990366 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4) {} 0.125 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4) - {} 0.0625 + {} 0.051879687409855456 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4) {} 0.16666666666666666 eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4) - {} 0.14583333333333334 + {} 0.14937343753004817 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4) - {} 0.08333333333333333 + {} 0.07625312493990362 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4) {} 0 diff --git a/promql/quantile.go b/promql/quantile.go index 7ddb76acba..06775d3ae6 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -153,19 +153,31 @@ func bucketQuantile(q float64, buckets buckets) (float64, bool, bool) { // histogramQuantile calculates the quantile 'q' based on the given histogram. // -// The quantile value is interpolated assuming a linear distribution within a -// bucket. -// TODO(beorn7): Find an interpolation method that is a better fit for -// exponential buckets (and think about configurable interpolation). +// For custom buckets, the result is interpolated linearly, i.e. it is assumed +// the observations are uniformly distributed within each bucket. (This is a +// quite blunt assumption, but it is consistent with the interpolation method +// used for classic histograms so far.) +// +// For exponential buckets, the interpolation is done under the assumption that +// the samples within each bucket are distributed in a way that they would +// uniformly populate the buckets in a hypothetical histogram with higher +// resolution. For example, if the rank calculation suggests that the requested +// quantile is right in the middle of the population of the (1,2] bucket, we +// assume the quantile would be right at the bucket boundary between the two +// buckets the (1,2] bucket would be divided into if the histogram had double +// the resolution, which is 2**2**-1 = 1.4142... We call this exponential +// interpolation. +// +// However, for a quantile that ends up in the zero bucket, this method isn't +// very helpful (because there is an infinite number of buckets close to zero, +// so we would have to assume zero as the result). Therefore, we return to +// linear interpolation in the zero bucket. // // A natural lower bound of 0 is assumed if the histogram has only positive // buckets. Likewise, a natural upper bound of 0 is assumed if the histogram has // only negative buckets. -// TODO(beorn7): Come to terms if we want that. // -// There are a number of special cases (once we have a way to report errors -// happening during evaluations of AST functions, we should report those -// explicitly): +// There are a number of special cases: // // If the histogram has 0 observations, NaN is returned. // @@ -193,9 +205,9 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { rank float64 ) - // if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator - // if the q < 0.5, use the forward iterator - // if the q >= 0.5, use the reverse iterator + // If there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator. + // If q < 0.5, use the forward iterator. + // If q >= 0.5, use the reverse iterator. if math.IsNaN(h.Sum) || q < 0.5 { it = h.AllBucketIterator() rank = q * h.Count @@ -260,8 +272,29 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { rank = count - rank } - // TODO(codesome): Use a better estimation than linear. - return bucket.Lower + (bucket.Upper-bucket.Lower)*(rank/bucket.Count) + // The fraction of how far we are into the current bucket. + fraction := rank / bucket.Count + + // Return linear interpolation for custom buckets and for quantiles that + // end up in the zero bucket. + if h.UsesCustomBuckets() || (bucket.Lower <= 0 && bucket.Upper >= 0) { + return bucket.Lower + (bucket.Upper-bucket.Lower)*fraction + } + + // For exponential buckets, we interpolate on a logarithmic scale. On a + // logarithmic scale, the exponential bucket boundaries (for any schema) + // become linear (every bucket has the same width). Therefore, after + // taking the logarithm of both bucket boundaries, we can use the + // calculated fraction in the same way as for linear interpolation (see + // above). Finally, we return to the normal scale by applying the + // exponential function to the result. + logLower := math.Log2(math.Abs(bucket.Lower)) + logUpper := math.Log2(math.Abs(bucket.Upper)) + if bucket.Lower > 0 { // Positive bucket. + return math.Exp2(logLower + (logUpper-logLower)*fraction) + } + // Otherwise, we are in a negative bucket and have to mirror things. + return -math.Exp2(logUpper + (logLower-logUpper)*(1-fraction)) } // histogramFraction calculates the fraction of observations between the @@ -271,8 +304,8 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { // histogramQuantile(0.9, h) returns 123.4, then histogramFraction(-Inf, 123.4, h) // returns 0.9. // -// The same notes (and TODOs) with regard to interpolation and assumptions about -// the zero bucket boundaries apply as for histogramQuantile. +// The same notes with regard to interpolation and assumptions about the zero +// bucket boundaries apply as for histogramQuantile. // // Whether either boundary is inclusive or exclusive doesn’t actually matter as // long as interpolation has to be performed anyway. In the case of a boundary @@ -310,7 +343,35 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 ) for it.Next() { b := it.At() - if b.Lower < 0 && b.Upper > 0 { + zeroBucket := false + + // interpolateLinearly is used for custom buckets to be + // consistent with the linear interpolation known from classic + // histograms. It is also used for the zero bucket. + interpolateLinearly := func(v float64) float64 { + return rank + b.Count*(v-b.Lower)/(b.Upper-b.Lower) + } + + // interpolateExponentially is using the same exponential + // interpolation method as above for histogramQuantile. This + // method is a better fit for exponential bucketing. + interpolateExponentially := func(v float64) float64 { + var ( + logLower = math.Log2(math.Abs(b.Lower)) + logUpper = math.Log2(math.Abs(b.Upper)) + logV = math.Log2(math.Abs(v)) + fraction float64 + ) + if v > 0 { + fraction = (logV - logLower) / (logUpper - logLower) + } else { + fraction = 1 - ((logV - logUpper) / (logLower - logUpper)) + } + return rank + b.Count*fraction + } + + if b.Lower <= 0 && b.Upper >= 0 { + zeroBucket = true switch { case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0: // This is the zero bucket and the histogram has only @@ -325,10 +386,12 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 } } if !lowerSet && b.Lower >= lower { + // We have hit the lower value at the lower bucket boundary. lowerRank = rank lowerSet = true } if !upperSet && b.Lower >= upper { + // We have hit the upper value at the lower bucket boundary. upperRank = rank upperSet = true } @@ -336,11 +399,21 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 break } if !lowerSet && b.Lower < lower && b.Upper > lower { - lowerRank = rank + b.Count*(lower-b.Lower)/(b.Upper-b.Lower) + // The lower value is in this bucket. + if h.UsesCustomBuckets() || zeroBucket { + lowerRank = interpolateLinearly(lower) + } else { + lowerRank = interpolateExponentially(lower) + } lowerSet = true } if !upperSet && b.Lower < upper && b.Upper > upper { - upperRank = rank + b.Count*(upper-b.Lower)/(b.Upper-b.Lower) + // The upper value is in this bucket. + if h.UsesCustomBuckets() || zeroBucket { + upperRank = interpolateLinearly(upper) + } else { + upperRank = interpolateExponentially(upper) + } upperSet = true } if lowerSet && upperSet { From 1639450172d060d467c429ab953f7deb3d37edbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Rabenstein?= Date: Tue, 10 Sep 2024 15:23:00 +0200 Subject: [PATCH 026/137] Merge pull request #14821 from charleskorn/nh-negative-multiplication-division promql: correctly handle unary negation of native histograms and add tests for multiplication and division of native histograms by negative scalars Signed-off-by: Bryan Boreham --- model/histogram/float_histogram_test.go | 96 +++++++++++ promql/engine.go | 3 + promql/parser/generated_parser.y | 8 +- promql/parser/generated_parser.y.go | 156 +++++++++--------- promql/parser/lex.go | 3 + promql/parser/parse_test.go | 24 ++- .../testdata/native_histograms.test | 26 ++- 7 files changed, 223 insertions(+), 93 deletions(-) diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 1558a6d679..cf370a313e 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -131,6 +131,54 @@ func TestFloatHistogramMul(t *testing.T) { NegativeBuckets: []float64{9, 3, 15, 18}, }, }, + { + "negation", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -11, + Count: -30, + Sum: -23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-1, 0, -3, -4, -7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-3, -1, -5, -6}, + }, + }, + { + "negative multiplier", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -22, + Count: -60, + Sum: -46, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-2, 0, -6, -8, -14}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-6, -2, -10, -12}, + }, + }, { "no-op with custom buckets", &FloatHistogram{ @@ -409,6 +457,54 @@ func TestFloatHistogramDiv(t *testing.T) { NegativeBuckets: []float64{1.5, 0.5, 2.5, 3}, }, }, + { + "negation", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + -1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -5.5, + Count: -3493.3, + Sum: -2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{-1, -3.3, -4.2, -0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-3.1, -3, -1.234e5, -1000}, + }, + }, + { + "negative half", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -5.5, + Count: -15, + Sum: -11.5, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-0.5, 0, -1.5, -2, -3.5}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-1.5, -0.5, -2.5, -3}, + }, + }, { "no-op with custom buckets", &FloatHistogram{ diff --git a/promql/engine.go b/promql/engine.go index e55f154d23..b583e12d57 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1815,6 +1815,9 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, for j := range mat[i].Floats { mat[i].Floats[j].F = -mat[i].Floats[j].F } + for j := range mat[i].Histograms { + mat[i].Histograms[j].H = mat[i].Histograms[j].H.Copy().Mul(-1) + } } if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index da24be0c44..befb9bdf3e 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -818,12 +818,12 @@ histogram_desc_item $$ = yylex.(*parser).newMap() $$["sum"] = $3 } - | COUNT_DESC COLON number + | COUNT_DESC COLON signed_or_unsigned_number { $$ = yylex.(*parser).newMap() $$["count"] = $3 } - | ZERO_BUCKET_DESC COLON number + | ZERO_BUCKET_DESC COLON signed_or_unsigned_number { $$ = yylex.(*parser).newMap() $$["z_bucket"] = $3 @@ -875,11 +875,11 @@ bucket_set : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET } ; -bucket_set_list : bucket_set_list SPACE number +bucket_set_list : bucket_set_list SPACE signed_or_unsigned_number { $$ = append($1, $3) } - | number + | signed_or_unsigned_number { $$ = []float64{$1} } diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 22231f73e2..ad58a52976 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -410,55 +410,55 @@ const yyPrivate = 57344 const yyLast = 799 var yyAct = [...]int16{ - 155, 334, 332, 276, 339, 152, 226, 39, 192, 44, - 291, 290, 156, 118, 82, 178, 229, 107, 106, 346, - 347, 348, 349, 109, 108, 198, 239, 199, 133, 110, - 105, 60, 245, 121, 6, 329, 325, 111, 328, 228, - 200, 201, 160, 119, 304, 267, 293, 128, 260, 160, - 151, 261, 159, 302, 358, 311, 122, 55, 89, 159, - 196, 241, 242, 259, 113, 243, 114, 54, 98, 99, - 302, 112, 101, 256, 104, 88, 230, 232, 234, 235, + 152, 334, 332, 155, 339, 226, 39, 192, 276, 44, + 291, 290, 118, 82, 178, 229, 107, 106, 346, 347, + 348, 349, 109, 108, 198, 239, 199, 156, 110, 105, + 6, 245, 200, 201, 133, 325, 111, 329, 228, 60, + 357, 293, 328, 304, 267, 160, 266, 128, 55, 151, + 302, 311, 302, 196, 340, 159, 55, 89, 54, 356, + 241, 242, 355, 113, 243, 114, 54, 98, 99, 265, + 112, 101, 256, 104, 88, 230, 232, 234, 235, 236, + 244, 246, 249, 250, 251, 252, 253, 257, 258, 105, + 333, 231, 233, 237, 238, 240, 247, 248, 103, 115, + 109, 254, 255, 324, 150, 218, 110, 264, 111, 270, + 77, 35, 7, 149, 188, 163, 322, 321, 173, 320, + 167, 170, 323, 165, 271, 166, 2, 3, 4, 5, + 263, 101, 194, 104, 180, 184, 197, 187, 186, 319, + 272, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 195, 299, 103, 318, + 217, 36, 298, 1, 190, 219, 220, 317, 160, 160, + 316, 193, 160, 154, 182, 196, 229, 297, 159, 159, + 160, 358, 159, 268, 181, 183, 239, 260, 296, 262, + 159, 315, 245, 129, 314, 55, 225, 313, 161, 228, + 161, 161, 259, 312, 161, 54, 86, 295, 310, 288, + 289, 8, 161, 292, 162, 37, 162, 162, 49, 269, + 162, 241, 242, 309, 179, 243, 180, 127, 162, 126, + 308, 223, 294, 256, 48, 222, 230, 232, 234, 235, 236, 244, 246, 249, 250, 251, 252, 253, 257, 258, - 160, 115, 231, 233, 237, 238, 240, 247, 248, 103, - 159, 109, 254, 255, 324, 150, 357, 110, 333, 218, - 111, 340, 310, 149, 77, 163, 7, 105, 35, 173, - 167, 170, 161, 323, 165, 356, 166, 309, 355, 194, - 2, 3, 4, 5, 308, 322, 184, 197, 162, 186, - 321, 195, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 216, 229, 129, 101, - 217, 104, 219, 220, 190, 266, 270, 239, 160, 121, - 268, 193, 264, 245, 55, 196, 154, 225, 159, 119, - 228, 271, 188, 160, 54, 161, 103, 117, 265, 84, - 262, 299, 122, 159, 320, 263, 298, 272, 10, 83, - 161, 162, 241, 242, 269, 187, 243, 185, 79, 288, - 289, 297, 319, 292, 256, 161, 162, 230, 232, 234, - 235, 236, 244, 246, 249, 250, 251, 252, 253, 257, - 258, 162, 294, 231, 233, 237, 238, 240, 247, 248, - 318, 317, 316, 254, 255, 180, 315, 134, 135, 136, - 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, - 147, 148, 157, 158, 169, 105, 314, 296, 300, 301, - 303, 223, 305, 313, 55, 222, 179, 168, 180, 84, - 306, 307, 177, 125, 54, 182, 295, 176, 124, 83, - 221, 312, 87, 89, 8, 181, 183, 81, 37, 86, - 175, 123, 36, 98, 99, 326, 327, 101, 102, 104, - 88, 127, 331, 126, 50, 336, 337, 338, 182, 335, - 78, 1, 342, 341, 344, 343, 49, 48, 181, 183, - 350, 351, 47, 55, 103, 352, 53, 77, 164, 56, - 46, 354, 22, 54, 59, 55, 172, 9, 9, 57, - 132, 45, 43, 130, 171, 54, 359, 42, 131, 41, - 40, 51, 191, 353, 273, 75, 85, 189, 224, 80, - 345, 18, 19, 120, 153, 20, 58, 227, 52, 116, + 221, 169, 231, 233, 237, 238, 240, 247, 248, 157, + 158, 164, 254, 255, 168, 10, 182, 300, 55, 301, + 303, 47, 305, 46, 132, 79, 181, 183, 54, 306, + 307, 45, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 43, 59, 50, + 84, 9, 9, 121, 326, 78, 327, 130, 171, 121, + 83, 42, 131, 119, 335, 336, 337, 331, 185, 119, + 338, 261, 342, 341, 344, 343, 122, 117, 41, 177, + 350, 351, 122, 55, 176, 352, 53, 77, 40, 56, + 125, 354, 22, 54, 84, 124, 172, 175, 51, 57, + 191, 353, 273, 85, 83, 189, 359, 224, 123, 80, + 345, 120, 81, 153, 58, 75, 227, 52, 116, 0, + 0, 18, 19, 0, 0, 20, 0, 0, 0, 0, 0, 76, 0, 0, 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, 13, 0, 0, 0, 24, 0, 30, - 0, 0, 31, 32, 55, 38, 0, 53, 77, 0, + 0, 0, 31, 32, 55, 38, 105, 53, 77, 0, 56, 275, 0, 22, 54, 0, 0, 0, 274, 0, 57, 0, 278, 279, 277, 284, 286, 283, 285, 280, - 281, 282, 287, 0, 0, 0, 75, 0, 0, 0, - 0, 0, 18, 19, 0, 0, 20, 0, 0, 0, - 0, 0, 76, 0, 0, 0, 0, 61, 62, 63, + 281, 282, 287, 87, 89, 0, 75, 0, 0, 0, + 0, 0, 18, 19, 98, 99, 20, 0, 101, 102, + 104, 88, 76, 0, 0, 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 0, 0, 0, 13, 0, 0, 0, 24, 0, + 74, 0, 0, 0, 13, 103, 0, 0, 24, 0, 30, 0, 55, 31, 32, 53, 77, 0, 56, 330, 0, 22, 54, 0, 0, 0, 0, 0, 57, 0, 278, 279, 277, 284, 286, 283, 285, 280, 281, 282, @@ -493,51 +493,51 @@ var yyAct = [...]int16{ } var yyPact = [...]int16{ - 32, 106, 569, 569, 405, 526, -1000, -1000, -1000, 105, + 28, 102, 569, 569, 405, 526, -1000, -1000, -1000, 98, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 277, -1000, 297, -1000, 650, + -1000, -1000, -1000, -1000, -1000, 342, -1000, 204, -1000, 650, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 22, 95, -1000, -1000, 483, -1000, 483, 101, + -1000, -1000, 21, 93, -1000, -1000, 483, -1000, 483, 97, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 167, -1000, -1000, - 281, -1000, -1000, 309, -1000, 23, -1000, -50, -50, -50, - -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, - -50, -50, -50, 48, 174, 336, 95, -56, -1000, 262, - 262, 324, -1000, 631, 103, -1000, 280, -1000, -1000, 274, - 241, -1000, -1000, -1000, 187, -1000, 180, -1000, 159, 483, - -1000, -57, -40, -1000, 483, 483, 483, 483, 483, 483, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 307, -1000, -1000, + 338, -1000, -1000, 225, -1000, 23, -1000, -44, -44, -44, + -44, -44, -44, -44, -44, -44, -44, -44, -44, -44, + -44, -44, -44, 47, 171, 259, 93, -57, -1000, 249, + 249, 324, -1000, 631, 75, -1000, 327, -1000, -1000, 222, + 130, -1000, -1000, -1000, 298, -1000, 112, -1000, 159, 483, + -1000, -58, -48, -1000, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, -1000, - 165, -1000, -1000, 94, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 40, 40, 269, -1000, -1000, -1000, -1000, 155, -1000, - -1000, 41, -1000, 650, -1000, -1000, 31, -1000, 170, -1000, - -1000, -1000, -1000, -1000, 163, -1000, -1000, -1000, -1000, -1000, - 19, 144, 140, -1000, -1000, -1000, 404, 16, 262, 262, - 262, 262, 103, 103, 251, 251, 251, 715, 696, 251, - 251, 715, 103, 103, 251, 103, 16, -1000, 24, -1000, - -1000, -1000, 265, -1000, 189, -1000, -1000, -1000, -1000, -1000, + 39, -1000, -1000, 90, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 36, 36, 229, -1000, -1000, -1000, -1000, 174, -1000, + -1000, 180, -1000, 650, -1000, -1000, 301, -1000, 105, -1000, + -1000, -1000, -1000, -1000, 44, -1000, -1000, -1000, -1000, -1000, + 18, 157, 83, -1000, -1000, -1000, 404, 15, 249, 249, + 249, 249, 75, 75, 402, 402, 402, 715, 696, 402, + 402, 715, 75, 75, 402, 75, 15, -1000, 19, -1000, + -1000, -1000, 186, -1000, 155, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 483, -1000, -1000, -1000, -1000, -1000, -1000, 34, 34, 18, - 34, 44, 44, 110, 38, -1000, -1000, 285, 267, 260, - 240, 236, 235, 234, 206, 188, 134, 129, -1000, -1000, - -1000, -1000, -1000, -1000, 102, -1000, -1000, -1000, 14, -1000, - 650, -1000, -1000, -1000, 34, -1000, 12, 9, 482, -1000, - -1000, -1000, 51, 81, 40, 40, 40, 97, 97, 51, - 97, 51, -73, -1000, -1000, -1000, -1000, -1000, 34, 34, - -1000, -1000, -1000, 34, -1000, -1000, -1000, -1000, -1000, -1000, - 40, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 104, -1000, 33, -1000, -1000, -1000, -1000, + 483, -1000, -1000, -1000, -1000, -1000, -1000, 31, 31, 17, + 31, 37, 37, 206, 34, -1000, -1000, 197, 191, 188, + 185, 164, 161, 153, 133, 113, 111, 110, -1000, -1000, + -1000, -1000, -1000, -1000, 101, -1000, -1000, -1000, 13, -1000, + 650, -1000, -1000, -1000, 31, -1000, 16, 11, 482, -1000, + -1000, -1000, 33, 163, 163, 163, 36, 40, 40, 33, + 40, 33, -74, -1000, -1000, -1000, -1000, -1000, 31, 31, + -1000, -1000, -1000, 31, -1000, -1000, -1000, -1000, -1000, -1000, + 163, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 38, -1000, 160, -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 379, 13, 378, 6, 15, 377, 344, 376, 374, - 373, 370, 198, 294, 369, 14, 368, 10, 11, 367, - 366, 8, 364, 3, 4, 363, 2, 1, 0, 362, - 12, 5, 361, 360, 18, 158, 359, 358, 7, 357, - 354, 17, 353, 31, 352, 9, 351, 350, 340, 332, - 327, 326, 314, 321, 302, + 0, 368, 12, 367, 5, 14, 366, 298, 364, 363, + 361, 360, 265, 211, 359, 13, 357, 10, 11, 355, + 353, 7, 352, 8, 4, 351, 2, 1, 3, 350, + 27, 0, 348, 338, 17, 193, 328, 312, 6, 311, + 308, 16, 307, 39, 297, 9, 281, 274, 273, 271, + 234, 218, 299, 163, 161, } var yyR1 = [...]int8{ @@ -630,9 +630,9 @@ var yyChk = [...]int16{ -38, -27, 19, -27, 26, -27, -21, -21, 24, 17, 2, 17, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 21, 2, 22, -4, -27, 26, 26, - 17, -23, -26, 57, -27, -31, -28, -28, -28, -24, + 17, -23, -26, 57, -27, -31, -31, -31, -28, -24, 14, -24, -26, -24, -26, -11, 92, 93, 94, 95, - -27, -27, -27, -25, -28, 24, 21, 2, 21, -28, + -27, -27, -27, -25, -31, 24, 21, 2, 21, -31, } var yyDef = [...]int16{ diff --git a/promql/parser/lex.go b/promql/parser/lex.go index d031e83307..82bf0367b8 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -610,6 +610,9 @@ func lexBuckets(l *Lexer) stateFn { case isSpace(r): l.emit(SPACE) return lexSpace + case r == '-': + l.emit(SUB) + return lexNumber case isDigit(r): l.backup() return lexNumber diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 37748323ce..d9956e7452 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -4084,17 +4084,17 @@ func TestParseHistogramSeries(t *testing.T) { }, { name: "all properties used", - input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}`, + input: `{} {{schema:1 sum:0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:3 n_buckets:[4.1 5] n_offset:5 counter_reset_hint:gauge}}`, expected: []histogram.FloatHistogram{{ Schema: 1, - Sum: -0.3, + Sum: 0.3, Count: 3.1, ZeroCount: 7.1, ZeroThreshold: 0.05, PositiveBuckets: []float64{5.1, 10, 7}, - PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}}, + PositiveSpans: []histogram.Span{{Offset: 3, Length: 3}}, NegativeBuckets: []float64{4.1, 5}, - NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}}, + NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}}, CounterResetHint: histogram.GaugeType, }}, }, @@ -4114,6 +4114,22 @@ func TestParseHistogramSeries(t *testing.T) { CounterResetHint: histogram.GaugeType, }}, }, + { + name: "all properties used, with negative values where supported", + input: `{} {{schema:1 sum:-0.3 count:-3.1 z_bucket:-7.1 z_bucket_w:0.05 buckets:[-5.1 -10 -7] offset:-3 n_buckets:[-4.1 -5] n_offset:-5 counter_reset_hint:gauge}}`, + expected: []histogram.FloatHistogram{{ + Schema: 1, + Sum: -0.3, + Count: -3.1, + ZeroCount: -7.1, + ZeroThreshold: 0.05, + PositiveBuckets: []float64{-5.1, -10, -7}, + PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}}, + NegativeBuckets: []float64{-4.1, -5}, + NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}}, + CounterResetHint: histogram.GaugeType, + }}, + }, { name: "static series", input: `{} {{buckets:[5 10 7] schema:1}}x2`, diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 71e102dcee..7d2eec32cf 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -720,27 +720,39 @@ eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(hist # Apply multiplication and division operator to histogram. load 10m - histogram_mul_div {{schema:0 count:21 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[3 3 3]}}x1 + histogram_mul_div {{schema:0 count:30 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[6 6 6]}}x1 float_series_3 3+0x1 float_series_0 0+0x1 eval instant at 10m histogram_mul_div*3 - {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} + +eval instant at 10m histogram_mul_div*-1 + {} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}} + +eval instant at 10m -histogram_mul_div + {} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}} + +eval instant at 10m histogram_mul_div*-3 + {} {{schema:0 count:-90 sum:-99 z_bucket:-9 z_bucket_w:0.001 buckets:[-9 -9 -9] n_buckets:[-18 -18 -18]}} eval instant at 10m 3*histogram_mul_div - {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div*float_series_3 - {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m float_series_3*histogram_mul_div - {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div/3 - {} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}} + {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} + +eval instant at 10m histogram_mul_div/-3 + {} {{schema:0 count:-10 sum:-11 z_bucket:-1 z_bucket_w:0.001 buckets:[-1 -1 -1] n_buckets:[-2 -2 -2]}} eval instant at 10m histogram_mul_div/float_series_3 - {} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}} + {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} eval instant at 10m histogram_mul_div*0 {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} From 06022a65099f40e915484b9ce2499ba02e1fcbac Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 19 Sep 2024 14:03:54 +0100 Subject: [PATCH 027/137] CHANGELOG: Add #14821 Signed-off-by: Bryan Boreham --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e183f894b0..65642d37c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,7 +32,7 @@ * [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810 * [BUGFIX] Remote-Write: fix metadata sending for experimental Remote-Write V2. #14766 * [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716 -* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14598, #14611, #14609, #14575, #14513 +* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14598, #14611, #14609, #14575, #14513, #14821 * [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042 * [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450 From 96e5a94d29707d2268feef7bd1c944ed53fd393e Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Wed, 18 Sep 2024 11:20:17 +0200 Subject: [PATCH 028/137] promql: rename holt_winters to double_exponential_smoothing Signed-off-by: Jan Fajerski --- docs/querying/functions.md | 11 +- promql/bench_test.go | 2 +- promql/functions.go | 154 +++++++++--------- promql/parser/functions.go | 4 +- promql/promqltest/testdata/functions.test | 6 +- ui-commits | 12 ++ web/ui/mantine-ui/src/promql/functionDocs.tsx | 6 +- web/ui/mantine-ui/src/promql/functionMeta.ts | 4 +- .../src/promql/functionSignatures.ts | 4 +- .../src/complete/promql.terms.ts | 2 +- .../codemirror-promql/src/types/function.ts | 6 +- web/ui/module/lezer-promql/src/highlight.js | 2 +- web/ui/module/lezer-promql/src/promql.grammar | 4 +- 13 files changed, 117 insertions(+), 100 deletions(-) create mode 100644 ui-commits diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 54d0d05ddc..9d73b53442 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -380,17 +380,22 @@ do not show up in the returned vector. Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard variance of observations in a native histogram. -## `holt_winters()` +## `double_exponential_smoothing()` **This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** -`holt_winters(v range-vector, sf scalar, tf scalar)` produces a smoothed value +`double_exponential_smoothing(v range-vector, sf scalar, tf scalar)` produces a smoothed value for time series based on the range in `v`. The lower the smoothing factor `sf`, the more importance is given to old data. The higher the trend factor `tf`, the more trends in the data is considered. Both `sf` and `tf` must be between 0 and 1. +For additional details, refer to [NIST Engineering Statistics Handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc433.htm). +In Prometheus V2 this function was called `holt_winters`. This caused confusion +since the Holt-Winters method usually refers to triple exponential smoothing. +Double exponential smoothing as implemented here is also referred to as "Holt +Linear". -`holt_winters` should only be used with gauges. +`double_exponential_smoothing` should only be used with gauges. ## `hour()` diff --git a/promql/bench_test.go b/promql/bench_test.go index 74e85b0548..cd6d1190ca 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -117,7 +117,7 @@ func rangeQueryCases() []benchCase { }, // Holt-Winters and long ranges. { - expr: "holt_winters(a_X[1d], 0.3, 0.3)", + expr: "double_exponential_smoothing(a_X[1d], 0.3, 0.3)", }, { expr: "changes(a_X[1d])", diff --git a/promql/functions.go b/promql/functions.go index 5bdcce65df..c4a7ee4a46 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -350,7 +350,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects // how trends in historical data will affect the current data. A higher trend factor increases the influence. // of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // The smoothing factor argument. @@ -1657,82 +1657,82 @@ func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // FunctionCalls is a list of all functions supported by PromQL, including their types. var FunctionCalls = map[string]FunctionCall{ - "abs": funcAbs, - "absent": funcAbsent, - "absent_over_time": funcAbsentOverTime, - "acos": funcAcos, - "acosh": funcAcosh, - "asin": funcAsin, - "asinh": funcAsinh, - "atan": funcAtan, - "atanh": funcAtanh, - "avg_over_time": funcAvgOverTime, - "ceil": funcCeil, - "changes": funcChanges, - "clamp": funcClamp, - "clamp_max": funcClampMax, - "clamp_min": funcClampMin, - "cos": funcCos, - "cosh": funcCosh, - "count_over_time": funcCountOverTime, - "days_in_month": funcDaysInMonth, - "day_of_month": funcDayOfMonth, - "day_of_week": funcDayOfWeek, - "day_of_year": funcDayOfYear, - "deg": funcDeg, - "delta": funcDelta, - "deriv": funcDeriv, - "exp": funcExp, - "floor": funcFloor, - "histogram_avg": funcHistogramAvg, - "histogram_count": funcHistogramCount, - "histogram_fraction": funcHistogramFraction, - "histogram_quantile": funcHistogramQuantile, - "histogram_sum": funcHistogramSum, - "histogram_stddev": funcHistogramStdDev, - "histogram_stdvar": funcHistogramStdVar, - "holt_winters": funcHoltWinters, - "hour": funcHour, - "idelta": funcIdelta, - "increase": funcIncrease, - "irate": funcIrate, - "label_replace": funcLabelReplace, - "label_join": funcLabelJoin, - "ln": funcLn, - "log10": funcLog10, - "log2": funcLog2, - "last_over_time": funcLastOverTime, - "mad_over_time": funcMadOverTime, - "max_over_time": funcMaxOverTime, - "min_over_time": funcMinOverTime, - "minute": funcMinute, - "month": funcMonth, - "pi": funcPi, - "predict_linear": funcPredictLinear, - "present_over_time": funcPresentOverTime, - "quantile_over_time": funcQuantileOverTime, - "rad": funcRad, - "rate": funcRate, - "resets": funcResets, - "round": funcRound, - "scalar": funcScalar, - "sgn": funcSgn, - "sin": funcSin, - "sinh": funcSinh, - "sort": funcSort, - "sort_desc": funcSortDesc, - "sort_by_label": funcSortByLabel, - "sort_by_label_desc": funcSortByLabelDesc, - "sqrt": funcSqrt, - "stddev_over_time": funcStddevOverTime, - "stdvar_over_time": funcStdvarOverTime, - "sum_over_time": funcSumOverTime, - "tan": funcTan, - "tanh": funcTanh, - "time": funcTime, - "timestamp": funcTimestamp, - "vector": funcVector, - "year": funcYear, + "abs": funcAbs, + "absent": funcAbsent, + "absent_over_time": funcAbsentOverTime, + "acos": funcAcos, + "acosh": funcAcosh, + "asin": funcAsin, + "asinh": funcAsinh, + "atan": funcAtan, + "atanh": funcAtanh, + "avg_over_time": funcAvgOverTime, + "ceil": funcCeil, + "changes": funcChanges, + "clamp": funcClamp, + "clamp_max": funcClampMax, + "clamp_min": funcClampMin, + "cos": funcCos, + "cosh": funcCosh, + "count_over_time": funcCountOverTime, + "days_in_month": funcDaysInMonth, + "day_of_month": funcDayOfMonth, + "day_of_week": funcDayOfWeek, + "day_of_year": funcDayOfYear, + "deg": funcDeg, + "delta": funcDelta, + "deriv": funcDeriv, + "exp": funcExp, + "floor": funcFloor, + "histogram_avg": funcHistogramAvg, + "histogram_count": funcHistogramCount, + "histogram_fraction": funcHistogramFraction, + "histogram_quantile": funcHistogramQuantile, + "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, + "double_exponential_smoothing": funcDoubleExponentialSmoothing, + "hour": funcHour, + "idelta": funcIdelta, + "increase": funcIncrease, + "irate": funcIrate, + "label_replace": funcLabelReplace, + "label_join": funcLabelJoin, + "ln": funcLn, + "log10": funcLog10, + "log2": funcLog2, + "last_over_time": funcLastOverTime, + "mad_over_time": funcMadOverTime, + "max_over_time": funcMaxOverTime, + "min_over_time": funcMinOverTime, + "minute": funcMinute, + "month": funcMonth, + "pi": funcPi, + "predict_linear": funcPredictLinear, + "present_over_time": funcPresentOverTime, + "quantile_over_time": funcQuantileOverTime, + "rad": funcRad, + "rate": funcRate, + "resets": funcResets, + "round": funcRound, + "scalar": funcScalar, + "sgn": funcSgn, + "sin": funcSin, + "sinh": funcSinh, + "sort": funcSort, + "sort_desc": funcSortDesc, + "sort_by_label": funcSortByLabel, + "sort_by_label_desc": funcSortByLabelDesc, + "sqrt": funcSqrt, + "stddev_over_time": funcStddevOverTime, + "stdvar_over_time": funcStdvarOverTime, + "sum_over_time": funcSumOverTime, + "tan": funcTan, + "tanh": funcTanh, + "time": funcTime, + "timestamp": funcTimestamp, + "vector": funcVector, + "year": funcYear, } // AtModifierUnsafeFunctions are the functions whose result diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 4fe3c80935..9d7b560537 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -202,8 +202,8 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, ReturnType: ValueTypeVector, }, - "holt_winters": { - Name: "holt_winters", + "double_exponential_smoothing": { + Name: "double_exponential_smoothing", ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, ReturnType: ValueTypeVector, Experimental: true, diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 4b025448a5..c9af6c4c90 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -651,7 +651,7 @@ eval_ordered instant at 50m sort_by_label(node_uname_info, "release") node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100 -# Tests for holt_winters +# Tests for double_exponential_smoothing clear # positive trends @@ -661,7 +661,7 @@ load 10s http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 -eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) +eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 8000 {job="api-server", instance="1", group="production"} 16000 {job="api-server", instance="0", group="canary"} 24000 @@ -675,7 +675,7 @@ load 10s http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300-80x1000 http_requests{job="api-server", instance="1", group="canary"} 0-40x1000 0+40x1000 -eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) +eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 0 {job="api-server", instance="1", group="production"} -16000 {job="api-server", instance="0", group="canary"} 24000 diff --git a/ui-commits b/ui-commits new file mode 100644 index 0000000000..7f34e1f95a --- /dev/null +++ b/ui-commits @@ -0,0 +1,12 @@ +dfec29d8e Fix border color for target pools with one target that is failing +65743bf9b ui: drop template readme +a7c1a951d Add general Mantine overrides CSS file +0757fbbec Make sure that alert element table headers are not wrapped +0180cf31a Factor out common icon and card styles +50af7d589 Fix tree line drawing by using a callback ref +ac01dc903 Explain, vector-to-vector: Do not compute results for set operators +9b0dc68d0 PromQL explain view: Support set operators +57898c792 Refactor and fix time formatting functions, add tests +091fc403c Fiddle with targets table styles to try and improve things a bit +a1908df92 Don't wrap action buttons below metric name in metrics explorer +ac5377873 mantine UI: Distinguish between Not Ready and Stopping diff --git a/web/ui/mantine-ui/src/promql/functionDocs.tsx b/web/ui/mantine-ui/src/promql/functionDocs.tsx index 36c081e866..45fcd03b7f 100644 --- a/web/ui/mantine-ui/src/promql/functionDocs.tsx +++ b/web/ui/mantine-ui/src/promql/functionDocs.tsx @@ -1277,17 +1277,17 @@ const funcDocs: Record = {

), - holt_winters: ( + double_exponential_smoothing: ( <>

- holt_winters(v range-vector, sf scalar, tf scalar) produces a smoothed value for time series based on + double_exponential_smoothing(v range-vector, sf scalar, tf scalar) produces a smoothed value for time series based on the range in v. The lower the smoothing factor sf, the more importance is given to old data. The higher the trend factor tf, the more trends in the data is considered. Both sf{' '} and tf must be between 0 and 1.

- holt_winters should only be used with gauges. + double_exponential_smoothing should only be used with gauges.

), diff --git a/web/ui/mantine-ui/src/promql/functionMeta.ts b/web/ui/mantine-ui/src/promql/functionMeta.ts index 631c86e2db..1dec466627 100644 --- a/web/ui/mantine-ui/src/promql/functionMeta.ts +++ b/web/ui/mantine-ui/src/promql/functionMeta.ts @@ -17,7 +17,7 @@ export const functionArgNames: Record = { // exp: [], // floor: [], histogram_quantile: ['target quantile', 'histogram'], - holt_winters: ['input series', 'smoothing factor', 'trend factor'], + double_exponential_smoothing: ['input series', 'smoothing factor', 'trend factor'], hour: ['timestamp (default = vector(time()))'], // idelta: [], // increase: [], @@ -68,7 +68,7 @@ export const functionDescriptions: Record = { exp: 'calculate exponential function for input vector values', floor: 'round down values of input series to nearest integer', histogram_quantile: 'calculate quantiles from histogram buckets', - holt_winters: 'calculate smoothed value of input series', + double_exponential_smoothing: 'calculate smoothed value of input series', hour: 'return the hour of the day for provided timestamps', idelta: 'calculate the difference between the last two samples of a range vector (for counters)', increase: 'calculate the increase in value over a range of time (for counters)', diff --git a/web/ui/mantine-ui/src/promql/functionSignatures.ts b/web/ui/mantine-ui/src/promql/functionSignatures.ts index 9f5b617420..472d54ac5a 100644 --- a/web/ui/mantine-ui/src/promql/functionSignatures.ts +++ b/web/ui/mantine-ui/src/promql/functionSignatures.ts @@ -60,8 +60,8 @@ export const functionSignatures: Record = { histogram_stddev: { name: 'histogram_stddev', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector }, histogram_stdvar: { name: 'histogram_stdvar', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector }, histogram_sum: { name: 'histogram_sum', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector }, - holt_winters: { - name: 'holt_winters', + double_exponential_smoothing: { + name: 'double_exponential_smoothing', argTypes: [valueType.matrix, valueType.scalar, valueType.scalar], variadic: 0, returnType: valueType.vector, diff --git a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts index f4f934f500..20cf14d4b6 100644 --- a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts +++ b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts @@ -258,7 +258,7 @@ export const functionIdentifierTerms = [ type: 'function', }, { - label: 'holt_winters', + label: 'double_exponential_smoothing', detail: 'function', info: 'Calculate smoothed value of input series', type: 'function', diff --git a/web/ui/module/codemirror-promql/src/types/function.ts b/web/ui/module/codemirror-promql/src/types/function.ts index 2505edc227..4048e2db03 100644 --- a/web/ui/module/codemirror-promql/src/types/function.ts +++ b/web/ui/module/codemirror-promql/src/types/function.ts @@ -46,7 +46,7 @@ import { HistogramStdDev, HistogramStdVar, HistogramSum, - HoltWinters, + DoubleExponentialSmoothing, Hour, Idelta, Increase, @@ -312,8 +312,8 @@ const promqlFunctions: { [key: number]: PromQLFunction } = { variadic: 0, returnType: ValueType.vector, }, - [HoltWinters]: { - name: 'holt_winters', + [DoubleExponentialSmoothing]: { + name: 'double_exponential_smoothing', argTypes: [ValueType.matrix, ValueType.scalar, ValueType.scalar], variadic: 0, returnType: ValueType.vector, diff --git a/web/ui/module/lezer-promql/src/highlight.js b/web/ui/module/lezer-promql/src/highlight.js index 92f27c8470..364c4e39ab 100644 --- a/web/ui/module/lezer-promql/src/highlight.js +++ b/web/ui/module/lezer-promql/src/highlight.js @@ -20,7 +20,7 @@ export const promQLHighLight = styleTags({ NumberDurationLiteral: tags.number, NumberDurationLiteralInDurationContext: tags.number, Identifier: tags.variableName, - 'Abs Absent AbsentOverTime Acos Acosh Asin Asinh Atan Atanh AvgOverTime Ceil Changes Clamp ClampMax ClampMin Cos Cosh CountOverTime DaysInMonth DayOfMonth DayOfWeek DayOfYear Deg Delta Deriv Exp Floor HistogramAvg HistogramCount HistogramFraction HistogramQuantile HistogramSum HoltWinters Hour Idelta Increase Irate LabelReplace LabelJoin LastOverTime Ln Log10 Log2 MaxOverTime MinOverTime Minute Month Pi PredictLinear PresentOverTime QuantileOverTime Rad Rate Resets Round Scalar Sgn Sin Sinh Sort SortDesc SortByLabel SortByLabelDesc Sqrt StddevOverTime StdvarOverTime SumOverTime Tan Tanh Time Timestamp Vector Year': + 'Abs Absent AbsentOverTime Acos Acosh Asin Asinh Atan Atanh AvgOverTime Ceil Changes Clamp ClampMax ClampMin Cos Cosh CountOverTime DaysInMonth DayOfMonth DayOfWeek DayOfYear Deg Delta Deriv Exp Floor HistogramAvg HistogramCount HistogramFraction HistogramQuantile HistogramSum DoubleExponentialSmoothing Hour Idelta Increase Irate LabelReplace LabelJoin LastOverTime Ln Log10 Log2 MaxOverTime MinOverTime Minute Month Pi PredictLinear PresentOverTime QuantileOverTime Rad Rate Resets Round Scalar Sgn Sin Sinh Sort SortDesc SortByLabel SortByLabelDesc Sqrt StddevOverTime StdvarOverTime SumOverTime Tan Tanh Time Timestamp Vector Year': tags.function(tags.variableName), 'Avg Bottomk Count Count_values Group LimitK LimitRatio Max Min Quantile Stddev Stdvar Sum Topk': tags.operatorKeyword, 'By Without Bool On Ignoring GroupLeft GroupRight Offset Start End': tags.modifier, diff --git a/web/ui/module/lezer-promql/src/promql.grammar b/web/ui/module/lezer-promql/src/promql.grammar index 95c09d25ab..977cce44db 100644 --- a/web/ui/module/lezer-promql/src/promql.grammar +++ b/web/ui/module/lezer-promql/src/promql.grammar @@ -141,7 +141,7 @@ FunctionIdentifier { HistogramStdVar | HistogramSum | HistogramAvg | - HoltWinters | + DoubleExponentialSmoothing | Hour | Idelta | Increase | @@ -388,7 +388,7 @@ NumberDurationLiteralInDurationContext { HistogramStdDev { condFn<"histogram_stddev"> } HistogramStdVar { condFn<"histogram_stdvar"> } HistogramSum { condFn<"histogram_sum"> } - HoltWinters { condFn<"holt_winters"> } + DoubleExponentialSmoothing { condFn<"double_exponential_smoothing"> } Hour { condFn<"hour"> } Idelta { condFn<"idelta"> } Increase { condFn<"increase"> } From aa6dd70812c5b2d27e9ff834d22cadb388f3fb87 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Wed, 18 Sep 2024 11:36:42 +0200 Subject: [PATCH 029/137] changelog: record holt_winters rename Signed-off-by: Jan Fajerski --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a60dea1698..e515bd370a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## unreleased +* [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930 * [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910 ## 3.0.0-beta.0 / 2024-09-05 From 6e899fbb1601ea4a7a4c937c12442a4f3a274334 Mon Sep 17 00:00:00 2001 From: Augustin Husson Date: Thu, 19 Sep 2024 16:35:14 +0200 Subject: [PATCH 030/137] fix autocompletion when using by/without Signed-off-by: Augustin Husson --- .../src/complete/hybrid.test.ts | 12 +++++++++++ .../codemirror-promql/src/complete/hybrid.ts | 21 +++++++------------ 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts b/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts index 4728f18228..587b31e743 100644 --- a/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts +++ b/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts @@ -601,6 +601,18 @@ describe('analyzeCompletion test', () => { pos: 10, expectedContext: [{ kind: ContextKind.MetricName, metricName: 'r' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation }], }, + { + title: 'autocomplete topk params 4', + expr: 'topk by(instance) ()', + pos: 19, + expectedContext: [{ kind: ContextKind.Number }], + }, + { + title: 'autocomplete topk params 5', + expr: 'topk by(instance) (inf,r)', + pos: 24, + expectedContext: [{ kind: ContextKind.MetricName, metricName: 'r' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation }], + }, ]; testCases.forEach((value) => { it(value.title, () => { diff --git a/web/ui/module/codemirror-promql/src/complete/hybrid.ts b/web/ui/module/codemirror-promql/src/complete/hybrid.ts index 6018b58743..24b23ec6b7 100644 --- a/web/ui/module/codemirror-promql/src/complete/hybrid.ts +++ b/web/ui/module/codemirror-promql/src/complete/hybrid.ts @@ -204,7 +204,7 @@ export function computeStartCompletePosition(state: EditorState, node: SyntaxNod // So we have to analyze the string about the current node to see if the duration unit is already present or not. (node.type.id === NumberDurationLiteralInDurationContext && !durationTerms.map((v) => v.label).includes(currentText[currentText.length - 1])) || (node.type.id === NumberDurationLiteral && node.parent?.type.id === 0 && node.parent.parent?.type.id === SubqueryExpr) || - (node.type.id === FunctionCallBody && isItATopKLikeAggregationFunc(node) && node.firstChild !== null) || + (node.type.id === FunctionCallBody && isAggregatorWithParam(node) && node.firstChild !== null) || (node.type.id === 0 && (node.parent?.type.id === OffsetExpr || node.parent?.type.id === MatrixSelector || @@ -215,18 +215,11 @@ export function computeStartCompletePosition(state: EditorState, node: SyntaxNod return start; } -function isItATopKLikeAggregationFunc(functionCallBody: SyntaxNode): boolean { - const prevSibling = functionCallBody.prevSibling; - if (prevSibling !== null && prevSibling.type.id === AggregateOp) { - const aggregationOpType = prevSibling.firstChild; - if ( - aggregationOpType !== null && - (aggregationOpType.type.id == Topk || - aggregationOpType.type.id === Bottomk || - aggregationOpType.type.id === LimitK || - aggregationOpType.type.id === LimitRatio || - aggregationOpType.type.id === CountValues) - ) { +function isAggregatorWithParam(functionCallBody: SyntaxNode): boolean { + const parent = functionCallBody.parent; + if (parent !== null && parent.firstChild?.type.id === AggregateOp) { + const aggregationOpType = parent.firstChild.firstChild; + if (aggregationOpType !== null && [Topk, Bottomk, LimitK, LimitRatio, CountValues].includes(aggregationOpType.type.id)) { return true; } } @@ -491,7 +484,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode, pos: num // Unfortunately, as a current node, codemirror doesn't give us the error node but instead the FunctionCallBody // The tree looks like that: PromQL(AggregateExpr(AggregateOp(Topk),FunctionCallBody(NumberDurationLiteral,⚠))) // So, we need to figure out if the cursor is on the first parameter or in the second. - if (isItATopKLikeAggregationFunc(node)) { + if (isAggregatorWithParam(node)) { if (node.firstChild === null || (node.firstChild.from <= pos && node.firstChild.to >= pos)) { // it means the FunctionCallBody has no child, which means we are autocompleting the first parameter result.push({ kind: ContextKind.Number }); From 005bd33fe229157a6da39545e16054160e084b05 Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Thu, 19 Sep 2024 15:38:26 -0700 Subject: [PATCH 031/137] support v2 proto for BenchmarkSampleSend (#14935) Signed-off-by: Callum Styan --- storage/remote/queue_manager_test.go | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 6c61a477f6..99fd023066 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -1326,21 +1326,25 @@ func BenchmarkSampleSend(b *testing.B) { cfg.MaxShards = 20 // todo: test with new proto type(s) - m := newTestQueueManager(b, cfg, mcfg, defaultFlushDeadline, c, config.RemoteWriteProtoMsgV1) - m.StoreSeries(series, 0) + for _, format := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} { + b.Run(string(format), func(b *testing.B) { + m := newTestQueueManager(b, cfg, mcfg, defaultFlushDeadline, c, format) + m.StoreSeries(series, 0) - // These should be received by the client. - m.Start() - defer m.Stop() + // These should be received by the client. + m.Start() + defer m.Stop() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Append(samples) - m.UpdateSeriesSegment(series, i+1) // simulate what wlog.Watcher.garbageCollectSeries does - m.SeriesReset(i + 1) + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Append(samples) + m.UpdateSeriesSegment(series, i+1) // simulate what wlog.Watcher.garbageCollectSeries does + m.SeriesReset(i + 1) + } + // Do not include shutdown + b.StopTimer() + }) } - // Do not include shutdown - b.StopTimer() } // Check how long it takes to add N series, including external labels processing. From 6dbb4e1a94f75a057720bf0dd101897283465c8e Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Fri, 20 Sep 2024 11:49:54 +1000 Subject: [PATCH 032/137] Fix linting issues Signed-off-by: Charles Korn --- model/histogram/float_histogram.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 1777afdbf1..300f3176e4 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -234,11 +234,11 @@ func (h *FloatHistogram) TestExpression() string { case UnknownCounterReset: // Unknown is the default, don't add anything. case CounterReset: - res = append(res, fmt.Sprintf("counter_reset_hint:reset")) + res = append(res, "counter_reset_hint:reset") case NotCounterReset: - res = append(res, fmt.Sprintf("counter_reset_hint:not_reset")) + res = append(res, "counter_reset_hint:not_reset") case GaugeType: - res = append(res, fmt.Sprintf("counter_reset_hint:gauge")) + res = append(res, "counter_reset_hint:gauge") } addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { From 5096bb9f29b85a152cf09090eb054ad3014c6810 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 20 Sep 2024 11:33:55 +0200 Subject: [PATCH 033/137] Mantine UI: removed unuse file Signed-off-by: Julien --- .../src/pages/targets/TargetsPage.tsx.copy | 355 ------------------ 1 file changed, 355 deletions(-) delete mode 100644 web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx.copy diff --git a/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx.copy b/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx.copy deleted file mode 100644 index 3b7bf2d656..0000000000 --- a/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx.copy +++ /dev/null @@ -1,355 +0,0 @@ -import { - Accordion, - ActionIcon, - Alert, - Badge, - Group, - Input, - RingProgress, - Select, - Stack, - Table, - Text, -} from "@mantine/core"; -import { - IconAlertTriangle, - IconInfoCircle, - IconLayoutNavbarCollapse, - IconLayoutNavbarExpand, - IconSearch, -} from "@tabler/icons-react"; -import { StateMultiSelect } from "../../components/StateMultiSelect"; -import { useSuspenseAPIQuery } from "../../api/api"; -import { ScrapePoolsResult } from "../../api/responseTypes/scrapePools"; -import { Target, TargetsResult } from "../../api/responseTypes/targets"; -import React, { useEffect } from "react"; -import badgeClasses from "../../Badge.module.css"; -import { - humanizeDurationRelative, - humanizeDuration, - now, -} from "../../lib/formatTime"; -import { LabelBadges } from "../../components/LabelBadges"; -import { useAppDispatch, useAppSelector } from "../../state/hooks"; -import { - setCollapsedPools, - updateTargetFilters, -} from "../../state/targetsPageSlice"; -import EndpointLink from "../../components/EndpointLink"; -import CustomInfiniteScroll from "../../components/CustomInfiniteScroll"; -import { filter } from "lodash"; - -type ScrapePool = { - targets: Target[]; - upCount: number; - downCount: number; - unknownCount: number; -}; - -type ScrapePools = { - [scrapePool: string]: ScrapePool; -}; - -const healthBadgeClass = (state: string) => { - switch (state.toLowerCase()) { - case "up": - return badgeClasses.healthOk; - case "down": - return badgeClasses.healthErr; - case "unknown": - return badgeClasses.healthUnknown; - default: - return badgeClasses.warn; - } -}; - -const groupTargets = (targets: Target[]): ScrapePools => { - const pools: ScrapePools = {}; - targets.forEach((target) => { - if (!pools[target.scrapePool]) { - pools[target.scrapePool] = { - targets: [], - upCount: 0, - downCount: 0, - unknownCount: 0, - }; - } - pools[target.scrapePool].targets.push(target); - switch (target.health.toLowerCase()) { - case "up": - pools[target.scrapePool].upCount++; - break; - case "down": - pools[target.scrapePool].downCount++; - break; - case "unknown": - pools[target.scrapePool].unknownCount++; - break; - } - }); - return pools; -}; - -const scrapePoolQueryParam = "scrapePool"; - -export default function TargetsPage() { - // Load the list of all available scrape pools. - const { - data: { - data: { scrapePools }, - }, - } = useSuspenseAPIQuery({ - path: `/scrape_pools`, - }); - - const dispatch = useAppDispatch(); - - // If there is a selected pool in the URL, extract it on initial load. - useEffect(() => { - const selectedPool = new URLSearchParams(window.location.search).get( - scrapePoolQueryParam - ); - if (selectedPool !== null) { - dispatch(updateTargetFilters({ scrapePool: selectedPool })); - } - }, [dispatch]); - - const filters = useAppSelector((state) => state.targetsPage.filters); - - let poolToShow = filters.scrapePool; - let limitedDueToManyPools = false; - - if (poolToShow === null && scrapePools.length > 20) { - poolToShow = scrapePools[0]; - limitedDueToManyPools = true; - } - - // Based on the selected pool (if any), load the list of targets. - const { - data: { - data: { activeTargets }, - }, - } = useSuspenseAPIQuery({ - path: `/targets`, - params: { - state: "active", - scrapePool: poolToShow === null ? "" : poolToShow, - }, - }); - - const collapsedPools = useAppSelector( - (state) => state.targetsPage.collapsedPools - ); - - const allPools = groupTargets(activeTargets); - const allPoolNames = Object.keys(allPools); - - return ( - <> - - } - placeholder="Filter by endpoint or labels" - > - - dispatch( - setCollapsedPools(collapsedPools.length > 0 ? [] : allPoolNames) - ) - } - > - {collapsedPools.length > 0 ? ( - - ) : ( - - )} - - - - {allPoolNames.length === 0 && ( - } - > - No targets found that match your filter criteria. - - )} - {limitedDueToManyPools && ( - } - > - There are many scrape pools configured. Showing only the first one. - Use the dropdown to select a different pool. - - )} - !collapsedPools.includes(p))} - onChange={(value) => - dispatch( - setCollapsedPools(allPoolNames.filter((p) => !value.includes(p))) - ) - } - > - {allPoolNames.map((poolName) => { - const pool = allPools[poolName]; - return ( - - - - {poolName} - - - {pool.upCount} / {pool.targets.length} up - - - - - - - - filters.health.length === 0 || - filters.health.includes(t.health.toLowerCase()) - )} - child={({ items }) => ( - - - - Endpoint - State - Labels - Last scrape - Scrape duration - - - - {items.map((target, i) => ( - // TODO: Find a stable and definitely unique key. - - - - {/* TODO: Process target URL like in old UI */} - - - - - {target.health} - - - - - - - {humanizeDurationRelative( - target.lastScrape, - now() - )} - - - {humanizeDuration( - target.lastScrapeDuration * 1000 - )} - - - {target.lastError && ( - - - } - > - Error scraping target:{" "} - {target.lastError} - - - - )} - - ))} - -
- )} - /> -
-
- ); - })} -
-
- - ); -} From 7ebda924b82ad6da515ecc9534573f652f2ef40e Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 20 Sep 2024 11:59:12 +0200 Subject: [PATCH 034/137] fix(web): properly format sub-millisecond durations in target status page Previously, scrapes durations that are very short (e.g., connection refused) could show as empty (durations under 1 millisecond). This commit ensures that sub-millisecond durations are correctly displayed as "0ms" or "1ms" when necessary. - Adjusted `humanizeDuration` to round sub-millisecond durations to the nearest millisecond. - Updated unit tests to verify the correct handling of sub-millisecond values. Signed-off-by: Julien --- web/ui/mantine-ui/src/lib/formatTime.test.ts | 6 ++++++ web/ui/mantine-ui/src/lib/formatTime.ts | 3 +++ 2 files changed, 9 insertions(+) diff --git a/web/ui/mantine-ui/src/lib/formatTime.test.ts b/web/ui/mantine-ui/src/lib/formatTime.test.ts index c8b743cc0f..597d6909b9 100644 --- a/web/ui/mantine-ui/src/lib/formatTime.test.ts +++ b/web/ui/mantine-ui/src/lib/formatTime.test.ts @@ -57,6 +57,12 @@ describe("humanizeDuration", () => { expect(humanizeDuration(0)).toBe("0s"); }); + test("formats submilliseconds correctly", () => { + expect(humanizeDuration(0.1)).toBe("0ms"); + expect(humanizeDuration(0.6)).toBe("1ms"); + expect(humanizeDuration(0.000001)).toBe("0ms"); + }); + test("formats milliseconds correctly", () => { expect(humanizeDuration(1)).toBe("1ms"); expect(humanizeDuration(999)).toBe("999ms"); diff --git a/web/ui/mantine-ui/src/lib/formatTime.ts b/web/ui/mantine-ui/src/lib/formatTime.ts index 3b6722209a..95beb21d6b 100644 --- a/web/ui/mantine-ui/src/lib/formatTime.ts +++ b/web/ui/mantine-ui/src/lib/formatTime.ts @@ -86,6 +86,9 @@ const formatDuration = ( r.push(`${v}${unit}`); } } + if (r.length == 0 && unit == "ms") { + r.push(`${Math.round(ms)}ms`) + } } return sign + r.join(componentSeparator || ""); From 146b22d196f9396abfb12900fbe4a16966b63cf5 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 20 Sep 2024 13:38:05 +0200 Subject: [PATCH 035/137] fix(autoreload): Reload invalid yaml files When a YAML file is invalid, trigger auto-reload anyway so that user is aware that the configuration file is incorrect. Failing to do so does not change the reload status in metrics and api. Signed-off-by: Julien --- cmd/prometheus/main.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 24e666cb6c..e7fd82e6f3 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1119,9 +1119,7 @@ func main() { currentChecksum, err := config.GenerateChecksum(cfg.configFile) if err != nil { level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) - continue - } - if currentChecksum == checksum { + } else if currentChecksum == checksum { continue } level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.") From 315165e49d0c19cf2e7888803d0b1f77b0181f49 Mon Sep 17 00:00:00 2001 From: Manik Rana Date: Fri, 20 Sep 2024 21:56:19 +0530 Subject: [PATCH 036/137] refac: make typeRequiresCT private Signed-off-by: Manik Rana --- model/textparse/openmetricsparse.go | 8 ++++---- model/textparse/promparse_test.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index 5f0415d3ee..ea7607c3a7 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -252,7 +252,7 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { // CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. // NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. func (p *OpenMetricsParser) CreatedTimestamp() *int64 { - if !TypeRequiresCT(p.mtype) { + if !typeRequiresCT(p.mtype) { // Not a CT supported metric type, fast path. return nil } @@ -302,8 +302,8 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { } } -// TypeRequiresCT returns true if the metric type requires a _created timestamp. -func TypeRequiresCT(t model.MetricType) bool { +// typeRequiresCT returns true if the metric type requires a _created timestamp. +func typeRequiresCT(t model.MetricType) bool { switch t { case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: return true @@ -594,7 +594,7 @@ func (p *OpenMetricsParser) isCreatedSeries() bool { var newLbs labels.Labels p.Metric(&newLbs) name := newLbs.Get(model.MetricNameLabel) - if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") { + if typeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") { return true } return false diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 992140ce0a..ce9daf53e0 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -214,7 +214,7 @@ func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLine if ctLinesRemoved { // Are CT series skipped? _, typ := p.Type() - if TypeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") { + if typeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") { t.Fatalf("we exped created lines skipped") } } From 3fba6bcdfaca1aa6ce079d6ce6ece1d7b209ac29 Mon Sep 17 00:00:00 2001 From: Manik Rana Date: Fri, 20 Sep 2024 22:00:15 +0530 Subject: [PATCH 037/137] chore: remove unused code Signed-off-by: Manik Rana --- promql/fuzz.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/promql/fuzz.go b/promql/fuzz.go index 5f08e6a72c..3fd50b9496 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -68,10 +68,6 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int { panic(warning) } - if contentType == "application/openmetrics-text" { - p = textparse.NewOpenMetricsParser(in, symbolTable) - } - var err error for { _, err = p.Next() From 9215252221529d3ed56b5bf019e14acf20e4b618 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 20 Sep 2024 17:40:17 +0100 Subject: [PATCH 038/137] [BUGFIX] TSDB: Only query chunks up to truncation time (#14948) If the query overlaps the range currently undergoing compaction, we should only fetch chunks up to that time. Need to store that min time in `HeadAndOOOIndexReader`. Signed-off-by: Bryan Boreham --- tsdb/db.go | 8 ++++++-- tsdb/ooo_head_read.go | 23 ++++++++++++----------- tsdb/ooo_head_read_test.go | 8 ++++---- tsdb/querier_test.go | 2 +- 4 files changed, 23 insertions(+), 18 deletions(-) diff --git a/tsdb/db.go b/tsdb/db.go index 64e1158d41..3b1dee27d4 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -2060,6 +2060,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) var headQuerier storage.Querier + inoMint := mint if maxt >= db.head.MinTime() || overlapsOOO { rh := NewRangeHead(db.head, mint, maxt) var err error @@ -2084,13 +2085,14 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { if err != nil { return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err) } + inoMint = newMint } } if overlapsOOO { // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) - headQuerier = NewHeadAndOOOQuerier(mint, maxt, db.head, isoState, headQuerier) + headQuerier = NewHeadAndOOOQuerier(inoMint, mint, maxt, db.head, isoState, headQuerier) } if headQuerier != nil { @@ -2136,6 +2138,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) var headQuerier storage.ChunkQuerier + inoMint := mint if maxt >= db.head.MinTime() || overlapsOOO { rh := NewRangeHead(db.head, mint, maxt) headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt) @@ -2159,13 +2162,14 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer if err != nil { return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err) } + inoMint = newMint } } if overlapsOOO { // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) - headQuerier = NewHeadAndOOOChunkQuerier(mint, maxt, db.head, isoState, headQuerier) + headQuerier = NewHeadAndOOOChunkQuerier(inoMint, mint, maxt, db.head, isoState, headQuerier) } if headQuerier != nil { diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 746d38a65b..26cd4d057e 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -35,6 +35,7 @@ var _ IndexReader = &HeadAndOOOIndexReader{} type HeadAndOOOIndexReader struct { *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. + inoMint int64 lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef } @@ -49,13 +50,13 @@ func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables) } -func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader { +func NewHeadAndOOOIndexReader(head *Head, inoMint, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader { hr := &headIndexReader{ head: head, mint: mint, maxt: maxt, } - return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef} + return &HeadAndOOOIndexReader{hr, inoMint, lastGarbageCollectedMmapRef} } func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { @@ -76,9 +77,9 @@ func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.S *chks = (*chks)[:0] if s.ooo != nil { - return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks) + return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, oh.inoMint, chks) } - *chks = appendSeriesChunks(s, oh.mint, oh.maxt, *chks) + *chks = appendSeriesChunks(s, oh.inoMint, oh.maxt, *chks) return nil } @@ -87,7 +88,7 @@ func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.S // // maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then // the oooHeadChunk will not be considered. -func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, chks *[]chunks.Meta) error { +func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, inoMint int64, chks *[]chunks.Meta) error { tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks)) addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) { @@ -128,7 +129,7 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap } if includeInOrder { - tmpChks = appendSeriesChunks(s, mint, maxt, tmpChks) + tmpChks = appendSeriesChunks(s, inoMint, maxt, tmpChks) } // There is nothing to do if we did not collect any chunk. @@ -476,7 +477,7 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l return nil } - return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, chks) + return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, 0, chks) } func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { @@ -516,7 +517,7 @@ type HeadAndOOOQuerier struct { querier storage.Querier // Used for LabelNames, LabelValues, but may be nil if head was truncated in the mean time, in which case we ignore it and not close it in the end. } -func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier { +func NewHeadAndOOOQuerier(inoMint, mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier { cr := &headChunkReader{ head: head, mint: mint, @@ -527,7 +528,7 @@ func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolatio mint: mint, maxt: maxt, head: head, - index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), + index: NewHeadAndOOOIndexReader(head, inoMint, mint, maxt, oooIsoState.minRef), chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0), querier: querier, } @@ -568,7 +569,7 @@ type HeadAndOOOChunkQuerier struct { querier storage.ChunkQuerier } -func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier { +func NewHeadAndOOOChunkQuerier(inoMint, mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier { cr := &headChunkReader{ head: head, mint: mint, @@ -579,7 +580,7 @@ func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIso mint: mint, maxt: maxt, head: head, - index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), + index: NewHeadAndOOOIndexReader(head, inoMint, mint, maxt, oooIsoState.minRef), chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0), querier: querier, } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index b5944f6c8a..8d1527e05c 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -360,7 +360,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { }) } - ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder @@ -451,7 +451,7 @@ func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenari for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { // We first want to test using a head index reader that covers the biggest query interval - oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMaxT, 0) + oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0) matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")} values, err := oh.LabelValues(ctx, "foo", matchers...) sort.Strings(values) @@ -857,7 +857,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { // The Series method populates the chunk metas, taking a copy of the // head OOO chunk if necessary. These are then used by the ChunkReader. - ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) @@ -1028,7 +1028,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // The Series method populates the chunk metas, taking a copy of the // head OOO chunk if necessary. These are then used by the ChunkReader. - ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 9ec807f803..77772937a7 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -3235,7 +3235,7 @@ func BenchmarkQueries(b *testing.B) { qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples) require.NoError(b, err) isoState := head.oooIso.TrackReadAfter(0) - qOOOHead := NewHeadAndOOOQuerier(1, nSamples, head, isoState, qHead) + qOOOHead := NewHeadAndOOOQuerier(1, 1, nSamples, head, isoState, qHead) queryTypes = append(queryTypes, qt{ fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), qOOOHead, From 6f0d6038b7f9430f023f3cddd9ea5b5e5e3c03cb Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 20 Sep 2024 17:40:17 +0100 Subject: [PATCH 039/137] [BUGFIX] TSDB: Only query chunks up to truncation time (#14948) If the query overlaps the range currently undergoing compaction, we should only fetch chunks up to that time. Need to store that min time in `HeadAndOOOIndexReader`. Signed-off-by: Bryan Boreham --- tsdb/db.go | 8 ++++++-- tsdb/ooo_head_read.go | 23 ++++++++++++----------- tsdb/ooo_head_read_test.go | 8 ++++---- tsdb/querier_test.go | 2 +- 4 files changed, 23 insertions(+), 18 deletions(-) diff --git a/tsdb/db.go b/tsdb/db.go index a5b3a5e602..2d2759b61c 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -2043,6 +2043,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) var headQuerier storage.Querier + inoMint := mint if maxt >= db.head.MinTime() || overlapsOOO { rh := NewRangeHead(db.head, mint, maxt) var err error @@ -2067,13 +2068,14 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { if err != nil { return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err) } + inoMint = newMint } } if overlapsOOO { // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) - headQuerier = NewHeadAndOOOQuerier(mint, maxt, db.head, isoState, headQuerier) + headQuerier = NewHeadAndOOOQuerier(inoMint, mint, maxt, db.head, isoState, headQuerier) } if headQuerier != nil { @@ -2119,6 +2121,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) var headQuerier storage.ChunkQuerier + inoMint := mint if maxt >= db.head.MinTime() || overlapsOOO { rh := NewRangeHead(db.head, mint, maxt) headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt) @@ -2142,13 +2145,14 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer if err != nil { return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err) } + inoMint = newMint } } if overlapsOOO { // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) - headQuerier = NewHeadAndOOOChunkQuerier(mint, maxt, db.head, isoState, headQuerier) + headQuerier = NewHeadAndOOOChunkQuerier(inoMint, mint, maxt, db.head, isoState, headQuerier) } if headQuerier != nil { diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 66ae93325d..6b54aeb7d5 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -35,6 +35,7 @@ var _ IndexReader = &HeadAndOOOIndexReader{} type HeadAndOOOIndexReader struct { *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. + inoMint int64 lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef } @@ -49,13 +50,13 @@ func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables) } -func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader { +func NewHeadAndOOOIndexReader(head *Head, inoMint, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader { hr := &headIndexReader{ head: head, mint: mint, maxt: maxt, } - return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef} + return &HeadAndOOOIndexReader{hr, inoMint, lastGarbageCollectedMmapRef} } func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { @@ -76,9 +77,9 @@ func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.S *chks = (*chks)[:0] if s.ooo != nil { - return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks) + return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, oh.inoMint, chks) } - *chks = appendSeriesChunks(s, oh.mint, oh.maxt, *chks) + *chks = appendSeriesChunks(s, oh.inoMint, oh.maxt, *chks) return nil } @@ -87,7 +88,7 @@ func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.S // // maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then // the oooHeadChunk will not be considered. -func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, chks *[]chunks.Meta) error { +func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, inoMint int64, chks *[]chunks.Meta) error { tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks)) addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) { @@ -128,7 +129,7 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap } if includeInOrder { - tmpChks = appendSeriesChunks(s, mint, maxt, tmpChks) + tmpChks = appendSeriesChunks(s, inoMint, maxt, tmpChks) } // There is nothing to do if we did not collect any chunk. @@ -476,7 +477,7 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l return nil } - return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, chks) + return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, 0, chks) } func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { @@ -516,7 +517,7 @@ type HeadAndOOOQuerier struct { querier storage.Querier // Used for LabelNames, LabelValues, but may be nil if head was truncated in the mean time, in which case we ignore it and not close it in the end. } -func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier { +func NewHeadAndOOOQuerier(inoMint, mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier { cr := &headChunkReader{ head: head, mint: mint, @@ -527,7 +528,7 @@ func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolatio mint: mint, maxt: maxt, head: head, - index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), + index: NewHeadAndOOOIndexReader(head, inoMint, mint, maxt, oooIsoState.minRef), chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0), querier: querier, } @@ -568,7 +569,7 @@ type HeadAndOOOChunkQuerier struct { querier storage.ChunkQuerier } -func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier { +func NewHeadAndOOOChunkQuerier(inoMint, mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier { cr := &headChunkReader{ head: head, mint: mint, @@ -579,7 +580,7 @@ func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIso mint: mint, maxt: maxt, head: head, - index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), + index: NewHeadAndOOOIndexReader(head, inoMint, mint, maxt, oooIsoState.minRef), chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0), querier: querier, } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 40e37043b8..b9f2133eaf 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -360,7 +360,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { }) } - ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder @@ -450,7 +450,7 @@ func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenari for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { // We first want to test using a head index reader that covers the biggest query interval - oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMaxT, 0) + oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0) matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")} values, err := oh.LabelValues(ctx, "foo", matchers...) sort.Strings(values) @@ -854,7 +854,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { // The Series method populates the chunk metas, taking a copy of the // head OOO chunk if necessary. These are then used by the ChunkReader. - ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) @@ -1023,7 +1023,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // The Series method populates the chunk metas, taking a copy of the // head OOO chunk if necessary. These are then used by the ChunkReader. - ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 50525f65f4..858b707932 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -3170,7 +3170,7 @@ func BenchmarkQueries(b *testing.B) { qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples) require.NoError(b, err) isoState := head.oooIso.TrackReadAfter(0) - qOOOHead := NewHeadAndOOOQuerier(1, nSamples, head, isoState, qHead) + qOOOHead := NewHeadAndOOOQuerier(1, 1, nSamples, head, isoState, qHead) queryTypes = append(queryTypes, qt{ fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), qOOOHead, From e3617cbd2cd48801a4c6a9e24b6e6ef8c610b879 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 20 Sep 2024 17:47:37 +0100 Subject: [PATCH 040/137] Add #14948 to CHANGELOG Also update the date of the RC which hasn't gone out yet. Signed-off-by: Bryan Boreham --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 65642d37c7..a84526edd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## unreleased -## 2.55.0-rc.0 / 2024-09-17 +## 2.55.0-rc.0 / 2024-09-20 * [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727 * [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 @@ -23,7 +23,7 @@ * [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413 * [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 * [PERF] Remote-Read: Support streaming mode. #11379 -* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874 +* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948 * [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 * [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 * [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 From 31c57605518d880c51328bc4ce419063a9341685 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 21 Sep 2024 11:19:21 +0100 Subject: [PATCH 041/137] Neater string vs byte-slice conversions (#14425) unsafe.Slice and unsafe.StringData were added in Go 1.20 Signed-off-by: Bryan Boreham --- model/labels/labels_common.go | 2 +- model/labels/labels_stringlabels.go | 11 ++++------- model/textparse/promparse.go | 2 +- tsdb/encoding/encoding.go | 4 +--- tsdb/index/index.go | 2 +- 5 files changed, 8 insertions(+), 13 deletions(-) diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index d7bdc1e076..99529a3836 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -230,5 +230,5 @@ func contains(s []Label, n string) bool { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index c8bce51234..c64bb990e0 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -16,7 +16,6 @@ package labels import ( - "reflect" "slices" "strings" "unsafe" @@ -299,10 +298,8 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } -func yoloBytes(s string) (b []byte) { - *(*string)(unsafe.Pointer(&b)) = s - (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) - return +func yoloBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) } // New returns a sorted Labels from the given labels. @@ -338,8 +335,8 @@ func Compare(a, b Labels) int { } i := 0 // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. - sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) - lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + sp := unsafe.Pointer(unsafe.StringData(shorter)) + lp := unsafe.Pointer(unsafe.StringData(longer)) for ; i < len(shorter)-8; i += 8 { if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { break diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index a611f3aea7..5759769279 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -502,7 +502,7 @@ func unreplace(s string) string { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } func parseFloat(s string) (float64, error) { diff --git a/tsdb/encoding/encoding.go b/tsdb/encoding/encoding.go index a7ce4e81e0..cc7d0990f6 100644 --- a/tsdb/encoding/encoding.go +++ b/tsdb/encoding/encoding.go @@ -20,7 +20,6 @@ import ( "hash" "hash/crc32" "math" - "unsafe" "github.com/dennwc/varint" ) @@ -75,8 +74,7 @@ func (e *Encbuf) PutVarint64(x int64) { // PutUvarintStr writes a string to the buffer prefixed by its varint length (in bytes!). func (e *Encbuf) PutUvarintStr(s string) { - b := *(*[]byte)(unsafe.Pointer(&s)) - e.PutUvarint(len(b)) + e.PutUvarint(len(s)) e.PutString(s) } diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 0e0e353719..ba262182c8 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -2063,5 +2063,5 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chu } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } From e3f5c7c2a07995adc9d73d2e7ae483e39dbca7ca Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 22 Sep 2024 17:42:04 +0100 Subject: [PATCH 042/137] [Release 2.55] Update CHANGELOG Make text more consistent with 3.0 branch Signed-off-by: Bryan Boreham --- CHANGELOG.md | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a84526edd7..64618d552e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,32 +9,30 @@ * [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817 * [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815 * [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734 -* [FEATURE] PromQL: Delay removal of `__name__` label - feature flag `promql-delayed-name-removal`. #14477 * [FEATURE] OTLP receiver: Optional promotion of resource attributes to series labels. #14200 -* [FEATURE] API: Support multiple `--web.listen-address`. #14665 * [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346 * [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403 * [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506 * [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706 * [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 -* [ENHANCEMENT] PromQL: Improve detail in distributed tracing. #14816 +* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379 +* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450 +* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 * [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655 * [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621 * [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413 +* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816 +* [ENHANCEMENT] API: Support multiple listening addresses. #14665 * [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 -* [PERF] Remote-Read: Support streaming mode. #11379 * [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948 * [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 * [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 * [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 -* [BUGFIX] fix(utf8): propagate validationScheme config to scraping options. #14880 -* [BUGFIX] PromQL: Experimental Native Histograms: Do not re-use spans between histograms. #14771 * [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810 * [BUGFIX] Remote-Write: fix metadata sending for experimental Remote-Write V2. #14766 * [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716 -* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14598, #14611, #14609, #14575, #14513, #14821 +* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14513, #14575, #14598, #14609, #14611, #14771, #14821 * [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042 -* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450 ## 2.54.1 / 2024-08-27 From 4c90118361d562f0272a29581ae37fa652d62975 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 22 Sep 2024 17:53:41 +0100 Subject: [PATCH 043/137] Remove CHANGELOG duplicate line Signed-off-by: Bryan Boreham #14402 is the issue and #14403 is the fix. --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 53406c3286..7fbdadfa62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,6 @@ As is traditional with a beta release, we do **not** recommend users install 3.0 * [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747 * [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526 * [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 -* [FEATURE] Promtool: Allow additional labels to be added to blocks created from openmetrics. #14402 * [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 * [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 From f179cb948bcd4c36cfc75e73dad709c8b9d50848 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Mon, 23 Sep 2024 13:46:51 +0200 Subject: [PATCH 044/137] chore: bump client_golang from 1.20.3 to 1.20.4 (#14963) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 269ddc57ec..c3f6bbe749 100644 --- a/go.mod +++ b/go.mod @@ -52,7 +52,7 @@ require ( github.com/oklog/ulid v1.3.1 github.com/ovh/go-ovh v1.6.0 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.20.3 + github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.59.1 github.com/prometheus/common/assets v0.2.0 diff --git a/go.sum b/go.sum index 63626288c8..6f31bec93f 100644 --- a/go.sum +++ b/go.sum @@ -614,8 +614,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= -github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= From 3f9b869fb57632f464942c8c08705d8e0ee0a695 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Sun, 22 Sep 2024 19:59:37 +0100 Subject: [PATCH 045/137] Fix react-app (old UI) package-lock.json cd web/ui/react-app npm install Signed-off-by: Arve Knudsen --- web/ui/react-app/package-lock.json | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index d456ca1f09..667eb0b375 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -1,12 +1,12 @@ { "name": "@prometheus-io/app", - "version": "0.54.1", + "version": "0.55.0-rc.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@prometheus-io/app", - "version": "0.54.1", + "version": "0.55.0-rc.0", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", @@ -24,7 +24,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.54.1", + "@prometheus-io/codemirror-promql": "0.55.0-rc.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", @@ -4341,12 +4341,12 @@ } }, "node_modules/@prometheus-io/codemirror-promql": { - "version": "0.54.1", - "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.54.1.tgz", - "integrity": "sha512-CkU5d+Nhbj+VjTYSlicIcFeL3KUYyEco/VHK+qM4TXgPQJxP04MCi642UVgLeuy9exThkCObj5oDJcApSNmxBw==", + "version": "0.55.0-rc.0", + "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.55.0-rc.0.tgz", + "integrity": "sha512-BlDKH2eB8Sd9bQmQjvJvncvZ+VTtrtReSO6qWZXULyrXp+FEjONybOH3Ejq/0a2hat0GpZzcEfwKqPbdy4WdCQ==", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.54.1", + "@prometheus-io/lezer-promql": "0.55.0-rc.0", "lru-cache": "^7.18.3" }, "engines": { @@ -4362,9 +4362,9 @@ } }, "node_modules/@prometheus-io/lezer-promql": { - "version": "0.54.1", - "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.54.1.tgz", - "integrity": "sha512-+QdeoN/PttM1iBeRtwSQWoaDIwnIgT9oIueTbAlvL01WM2eluD8j9vNiD0oJFzbcZ5clxwhvMP54InIt3vJaMg==", + "version": "0.55.0-rc.0", + "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.55.0-rc.0.tgz", + "integrity": "sha512-Ikaabw8gfu0HI2D2rKykLBWio+ytTEE03bdZDMpILYULoeGVPdKgbeGLLI9Kafyv48Qiis55o60EfDoywiRHqA==", "license": "Apache-2.0", "peerDependencies": { "@lezer/highlight": "^1.1.2", From c2bbabb4a74d6c7415dc75ed086e7bdf70ede1b0 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 24 Sep 2024 12:03:56 +0200 Subject: [PATCH 046/137] promql.Engine: Refactor vector selector evaluation into a method (#14900) * PromQL.Engine: Refactor Matrix expansion into a method Add utility method promql.evaluator.expandSeriesToMatrix, for expanding a slice of storage.Series into a promql.Matrix. Signed-off-by: Arve Knudsen * Rename to generateMatrix Rename evaluator.expandSeriesToMatrix into generateMatrix, while also dropping the start, end, interval arguments since they are evaluator fields. Write more extensive method documentation. Signed-off-by: Arve Knudsen * Rename to evalVectorSelector Rename to evalVectorSelector after discussing with @michahoffmann. Signed-off-by: Arve Knudsen --------- Signed-off-by: Arve Knudsen --- promql/engine.go | 117 +++++++++++++++++++++++++++-------------------- 1 file changed, 67 insertions(+), 50 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 983e016666..e10be63783 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1004,6 +1004,8 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { return false, nil } +// checkAndExpandSeriesSet expands expr's UnexpandedSeriesSet into expr's Series. +// If the Series field is already non-nil, it's a no-op. func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations.Annotations, error) { switch e := expr.(type) { case *parser.MatrixSelector: @@ -1455,6 +1457,70 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate return result, warnings } +// evalVectorSelector generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from vs. +// vs.Series has to be expanded before calling this method. +// For every series iterator in vs.Series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp, +// collecting every corresponding sample (obtained via ev.vectorSelectorSingle) into a Series. +// All of the generated Series are collected into a Matrix, that gets returned. +func (ev *evaluator) evalVectorSelector(ctx context.Context, vs *parser.VectorSelector) Matrix { + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + + mat := make(Matrix, 0, len(vs.Series)) + var prevSS *Series + it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator + for _, s := range vs.Series { + if err := contextDone(ctx, "expression evaluation"); err != nil { + ev.error(err) + } + + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) + ss := Series{ + Metric: s.Labels(), + } + + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { + step++ + _, f, h, ok := ev.vectorSelectorSingle(it, vs, ts) + if !ok { + continue + } + + if h == nil { + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtStep(step, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if ss.Floats == nil { + ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) + } else { + point := HPoint{H: h, T: ts} + histSize := point.size() + ev.currentSamples += histSize + ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize)) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if ss.Histograms == nil { + ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps) + } + ss.Histograms = append(ss.Histograms, point) + } + } + + if len(ss.Floats)+len(ss.Histograms) > 0 { + mat = append(mat, ss) + prevSS = &mat[len(mat)-1] + } + } + ev.samplesStats.UpdatePeak(ev.currentSamples) + return mat +} + // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { @@ -1892,56 +1958,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } - mat := make(Matrix, 0, len(e.Series)) - var prevSS *Series - it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) - var chkIter chunkenc.Iterator - for i, s := range e.Series { - if err := contextDone(ctx, "expression evaluation"); err != nil { - ev.error(err) - } - chkIter = s.Iterator(chkIter) - it.Reset(chkIter) - ss := Series{ - Metric: e.Series[i].Labels(), - } - - for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { - step++ - _, f, h, ok := ev.vectorSelectorSingle(it, e, ts) - if ok { - if h == nil { - ev.currentSamples++ - ev.samplesStats.IncrementSamplesAtStep(step, 1) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - if ss.Floats == nil { - ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) - } - ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) - } else { - point := HPoint{H: h, T: ts} - histSize := point.size() - ev.currentSamples += histSize - ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize)) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - if ss.Histograms == nil { - ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps) - } - ss.Histograms = append(ss.Histograms, point) - } - } - } - - if len(ss.Floats)+len(ss.Histograms) > 0 { - mat = append(mat, ss) - prevSS = &mat[len(mat)-1] - } - } - ev.samplesStats.UpdatePeak(ev.currentSamples) + mat := ev.evalVectorSelector(ctx, e) return mat, ws case *parser.MatrixSelector: From a9243d4d2c2d68ed1ad957dc810408798910fe7f Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 24 Sep 2024 14:55:54 +0200 Subject: [PATCH 047/137] docs: Improve, clarify, and fix documentation on scrape limits In detail: - Clarify that label name and value length limits are in byte, not in UTF-8 data points. - More consistent formatting to keep 80 characters line limet. - Clarify various misleading specifications around "per sample", "per scrape", "per scrape config", "per job"... - Fix grammar. Signed-off-by: beorn7 --- docs/configuration/configuration.md | 57 ++++++++++++++++------------- 1 file changed, 32 insertions(+), 25 deletions(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 2ca7ce67e4..096809397a 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -71,8 +71,11 @@ global: # How frequently to evaluate rules. [ evaluation_interval: | default = 1m ] - # Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. - # Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping. + # Offset the rule evaluation timestamp of this particular group by the + # specified duration into the past to ensure the underlying metrics have + # been received. Metric availability delays are more likely to occur when + # Prometheus is running as a remote write target, but can also occur when + # there's anomalies with scraping. [ rule_query_offset: | default = 0s ] # The labels to add to any time series or alerts when communicating with @@ -94,27 +97,29 @@ global: # change or be removed in the future. [ body_size_limit: | default = 0 ] - # Per-scrape limit on number of scraped samples that will be accepted. + # Per-scrape limit on the number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] - # Per-scrape limit on number of labels that will be accepted for a sample. If - # more than this number of labels are present post metric-relabeling, the - # entire scrape will be treated as failed. 0 means no limit. + # Limit on the number of labels that will be accepted per sample. If more + # than this number of labels are present on any sample post metric-relabeling, + # the entire scrape will be treated as failed. 0 means no limit. [ label_limit: | default = 0 ] - # Per-scrape limit on length of labels name that will be accepted for a sample. - # If a label name is longer than this number post metric-relabeling, the entire - # scrape will be treated as failed. 0 means no limit. + # Limit on the length (in bytes) of each individual label name. If any label + # name in a scrape is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. Note that label names are UTF-8 + # encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_name_length_limit: | default = 0 ] - # Per-scrape limit on length of labels value that will be accepted for a sample. - # If a label value is longer than this number post metric-relabeling, the - # entire scrape will be treated as failed. 0 means no limit. + # Limit on the length (in bytes) of each individual label value. If any label + # value in a scrape is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. Note that label values are UTF-8 + # encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_value_length_limit: | default = 0 ] - # Per-scrape config limit on number of unique targets that will be + # Limit per scrape config on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. # 0 means no limit. This is an experimental feature, this behaviour could @@ -456,34 +461,36 @@ metric_relabel_configs: # change or be removed in the future. [ body_size_limit: | default = 0 ] -# Per-scrape limit on number of scraped samples that will be accepted. +# Per-scrape limit on the number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] -# Per-scrape limit on number of labels that will be accepted for a sample. If -# more than this number of labels are present post metric-relabeling, the -# entire scrape will be treated as failed. 0 means no limit. +# Limit on the number of labels that will be accepted per sample. If more +# than this number of labels are present on any sample post metric-relabeling, +# the entire scrape will be treated as failed. 0 means no limit. [ label_limit: | default = 0 ] -# Per-scrape limit on length of labels name that will be accepted for a sample. -# If a label name is longer than this number post metric-relabeling, the entire -# scrape will be treated as failed. 0 means no limit. +# Limit on the length (in bytes) of each individual label name. If any label +# name in a scrape is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. Note that label names are UTF-8 +# encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_name_length_limit: | default = 0 ] -# Per-scrape limit on length of labels value that will be accepted for a sample. -# If a label value is longer than this number post metric-relabeling, the -# entire scrape will be treated as failed. 0 means no limit. +# Limit on the length (in bytes) of each individual label value. If any label +# value in a scrape is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. Note that label values are UTF-8 +# encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_value_length_limit: | default = 0 ] -# Per-scrape config limit on number of unique targets that will be +# Limit per scrape config on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. # 0 means no limit. This is an experimental feature, this behaviour could # change in the future. [ target_limit: | default = 0 ] -# Per-job limit on the number of targets dropped by relabeling +# Limit per scrape config on the number of targets dropped by relabeling # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] From cb4bc5e7868a93a204b8a29faa866dd9bfc7cd31 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 24 Sep 2024 15:28:04 +0200 Subject: [PATCH 048/137] UI: Make mantime UI assets relative Signed-off-by: Jesus Vazquez --- web/ui/mantine-ui/vite.config.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/web/ui/mantine-ui/vite.config.ts b/web/ui/mantine-ui/vite.config.ts index 079605403d..63bf6a1268 100644 --- a/web/ui/mantine-ui/vite.config.ts +++ b/web/ui/mantine-ui/vite.config.ts @@ -3,6 +3,7 @@ import react from "@vitejs/plugin-react"; // https://vitejs.dev/config/ export default defineConfig({ + base: '', plugins: [react()], server: { proxy: { From e196b977afdfd3cc72ac15de97845bec056a8a3d Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 25 Sep 2024 10:38:47 +0200 Subject: [PATCH 049/137] Process MemPostings.Delete() with GOMAXPROCS workers We are still seeing lock contention on MemPostings.mtx, and MemPostings.Delete() is by far the most expensive operation on that mutex. This adds parallelism to that method, trying to reduce the amount of time we spend with the mutex held. Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 43 ++++++++++++++++++++++++++++++++----- tsdb/index/postings_test.go | 2 +- 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index bfe74c323d..25780e4ad8 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -26,6 +26,7 @@ import ( "sync" "github.com/bboreham/go-loser" + "github.com/cespare/xxhash/v2" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -293,6 +294,9 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma p.mtx.Lock() defer p.mtx.Unlock() + // Deleting label names mutates p.m map, so it should be done from a single goroutine after nobody else is reading it. + deleteLabelNames := make(chan string, len(p.m)) + process := func(l labels.Label) { orig := p.m[l.Name][l.Value] repl := make([]storage.SeriesRef, 0, len(orig)) @@ -305,17 +309,46 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma p.m[l.Name][l.Value] = repl } else { delete(p.m[l.Name], l.Value) - // Delete the key if we removed all values. if len(p.m[l.Name]) == 0 { - delete(p.m, l.Name) + // Delete the key if we removed all values. + deleteLabelNames <- l.Name } } } - for l := range affected { - process(l) + // Create GOMAXPROCS workers. + wg := sync.WaitGroup{} + jobs := make([]chan labels.Label, runtime.GOMAXPROCS(0)) + for i := range jobs { + jobs[i] = make(chan labels.Label, 128) + wg.Add(1) + go func(jobs chan labels.Label) { + defer wg.Done() + for l := range jobs { + process(l) + } + }(jobs[i]) + } + + // Process all affected labels and the allPostingsKey. + for l := range affected { + j := int(xxhash.Sum64String(l.Name) % uint64(len(jobs))) + jobs[j] <- l + } + j := int(xxhash.Sum64String(allPostingsKey.Name) % uint64(len(jobs))) + jobs[j] <- allPostingsKey + + // Close jobs channels and wait all workers to finish. + for i := range jobs { + close(jobs[i]) + } + wg.Wait() + + // Close deleteLabelNames channel and delete the label names requested. + close(deleteLabelNames) + for name := range deleteLabelNames { + delete(p.m, name) } - process(allPostingsKey) } // Iter calls f for each postings list. It aborts if f returns an error and returns it. diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 96c9ed124b..1802c9e891 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -1025,7 +1025,7 @@ func BenchmarkMemPostings_Delete(b *testing.B) { return s } - const total = 1e6 + const total = 2e6 allSeries := [total]labels.Labels{} nameValues := make([]string, 0, 100) for i := 0; i < total; i++ { From 9c417aa71045e36d9fad66e1e77c1d942cbacc17 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 25 Sep 2024 14:08:50 +0200 Subject: [PATCH 050/137] Fix deadlock with empty MemPostings Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 25780e4ad8..3164d8c2fb 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -295,7 +295,8 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma defer p.mtx.Unlock() // Deleting label names mutates p.m map, so it should be done from a single goroutine after nobody else is reading it. - deleteLabelNames := make(chan string, len(p.m)) + // Adding +1 to length to account for allPostingsKey processing when MemPostings is empty. + deleteLabelNames := make(chan string, len(p.m)+1) process := func(l labels.Label) { orig := p.m[l.Name][l.Value] From ccd0308abcb98505797161b9142da1fe9ddbe88c Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 25 Sep 2024 14:59:16 +0200 Subject: [PATCH 051/137] Don't do anything if MemPostings are empty Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 3164d8c2fb..e6a6c708ff 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -293,10 +293,12 @@ func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) { func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected map[labels.Label]struct{}) { p.mtx.Lock() defer p.mtx.Unlock() + if len(p.m) == 0 || len(deleted) == 0 { + return + } // Deleting label names mutates p.m map, so it should be done from a single goroutine after nobody else is reading it. - // Adding +1 to length to account for allPostingsKey processing when MemPostings is empty. - deleteLabelNames := make(chan string, len(p.m)+1) + deleteLabelNames := make(chan string, len(p.m)) process := func(l labels.Label) { orig := p.m[l.Name][l.Value] From be0f10054e6310a993e9e73dd97607071749ac76 Mon Sep 17 00:00:00 2001 From: Alex Johnson Date: Sat, 14 Sep 2024 13:04:33 -0500 Subject: [PATCH 052/137] Remove no-default-scrape-port featureFlag Signed-off-by: Alex Johnson --- cmd/prometheus/main.go | 5 +--- cmd/promtool/main.go | 11 +++---- cmd/promtool/sd.go | 8 +++--- cmd/promtool/sd_test.go | 2 +- docs/command-line/prometheus.md | 2 +- docs/command-line/promtool.md | 2 +- docs/feature_flags.md | 9 ------ scrape/manager.go | 3 +- scrape/manager_test.go | 30 +++++++++---------- scrape/scrape.go | 5 +--- scrape/target.go | 51 ++------------------------------- scrape/target_test.go | 4 +-- 12 files changed, 32 insertions(+), 100 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e7fd82e6f3..1be8b76949 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -217,9 +217,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "concurrent-rule-eval": c.enableConcurrentRuleEval = true level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.") - case "no-default-scrape-port": - c.scrape.NoDefaultPort = true - level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.") case "promql-experimental-functions": parser.EnableExperimentalFunctions = true level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") @@ -474,7 +471,7 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 4d4cf6c5db..48f9be9309 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -291,7 +291,7 @@ func main() { promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String() promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String() - featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings() + featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Currently unused.").Default("").Strings() documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden() @@ -321,24 +321,21 @@ func main() { } } - var noDefaultScrapePort bool for _, f := range *featureList { opts := strings.Split(f, ",") for _, o := range opts { switch o { - case "no-default-scrape-port": - noDefaultScrapePort = true case "": continue default: - fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o) + fmt.Printf(" WARNING: --enable-feature is currently a no-op") } } } switch parsedCmd { case sdCheckCmd.FullCommand(): - os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer)) + os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer)) case checkConfigCmd.FullCommand(): os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) @@ -1219,7 +1216,7 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c lb := labels.NewBuilder(labels.EmptyLabels()) for _, tg := range targetGroups { var failures []error - targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb) + targets, failures = scrape.TargetsFromGroup(tg, scfg, targets, lb) if len(failures) > 0 { first := failures[0] return first diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go index e65262d439..6c0e896ffe 100644 --- a/cmd/promtool/sd.go +++ b/cmd/promtool/sd.go @@ -38,7 +38,7 @@ type sdCheckResult struct { } // CheckSD performs service discovery for the given job name and reports the results. -func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int { +func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int { logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) @@ -114,7 +114,7 @@ outerLoop: } results := []sdCheckResult{} for _, tgs := range sdCheckResults { - results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...) + results = append(results, getSDCheckResult(tgs, scrapeConfig)...) } res, err := json.MarshalIndent(results, "", " ") @@ -127,7 +127,7 @@ outerLoop: return successExitCode } -func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult { +func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult { sdCheckResults := []sdCheckResult{} lb := labels.NewBuilder(labels.EmptyLabels()) for _, targetGroup := range targetGroups { @@ -144,7 +144,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc } } - res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort) + res, orig, err := scrape.PopulateLabels(lb, scrapeConfig) result := sdCheckResult{ DiscoveredLabels: orig, Labels: res, diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go index cb65ee72aa..44d8084651 100644 --- a/cmd/promtool/sd_test.go +++ b/cmd/promtool/sd_test.go @@ -70,5 +70,5 @@ func TestSDCheckResult(t *testing.T) { }, } - testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true)) + testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig)) } diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 32b7039bd3..7737b50210 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -56,7 +56,7 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index e48cede79c..996a996555 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system. | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | --version | Show application version. | | --experimental | Enable experimental commands. | -| --enable-feature ... | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | +| --enable-feature ... | Comma separated feature names to enable. Currently unused. | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 911dde20e4..1e9455a3fd 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -71,15 +71,6 @@ When enabled, the GOMEMLIMIT variable is automatically set to match the Linux co There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus. -## No default scrape port - -`--enable-feature=no-default-scrape-port` - -When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to -the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior. -In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or -by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed. - ## Native Histograms `--enable-feature=native-histograms` diff --git a/scrape/manager.go b/scrape/manager.go index d7786a082b..cbb881028d 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -70,8 +70,7 @@ func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(strin // Options are the configuration parameters to the scrape manager. type Options struct { - ExtraMetrics bool - NoDefaultPort bool + ExtraMetrics bool // Option used by downstream scraper users like OpenTelemetry Collector // to help lookup metric metadata. Should be false for Prometheus. PassMetadataInContext bool diff --git a/scrape/manager_test.go b/scrape/manager_test.go index cd712ca62b..81ce8bd84f 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -52,12 +52,11 @@ func init() { func TestPopulateLabels(t *testing.T) { cases := []struct { - in labels.Labels - cfg *config.ScrapeConfig - noDefaultPort bool - res labels.Labels - resOrig labels.Labels - err string + in labels.Labels + cfg *config.ScrapeConfig + res labels.Labels + resOrig labels.Labels + err string }{ // Regular population of scrape config options. { @@ -111,8 +110,8 @@ func TestPopulateLabels(t *testing.T) { ScrapeTimeout: model.Duration(time.Second), }, res: labels.FromMap(map[string]string{ - model.AddressLabel: "1.2.3.4:80", - model.InstanceLabel: "1.2.3.4:80", + model.AddressLabel: "1.2.3.4", + model.InstanceLabel: "1.2.3.4", model.SchemeLabel: "http", model.MetricsPathLabel: "/custom", model.JobLabel: "custom-job", @@ -142,7 +141,7 @@ func TestPopulateLabels(t *testing.T) { ScrapeTimeout: model.Duration(time.Second), }, res: labels.FromMap(map[string]string{ - model.AddressLabel: "[::1]:443", + model.AddressLabel: "[::1]", model.InstanceLabel: "custom-instance", model.SchemeLabel: "https", model.MetricsPathLabel: "/metrics", @@ -365,7 +364,6 @@ func TestPopulateLabels(t *testing.T) { ScrapeInterval: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second), }, - noDefaultPort: true, res: labels.FromMap(map[string]string{ model.AddressLabel: "1.2.3.4", model.InstanceLabel: "1.2.3.4", @@ -384,7 +382,7 @@ func TestPopulateLabels(t *testing.T) { model.ScrapeTimeoutLabel: "1s", }), }, - // Remove default port (http). + // verify that the default port is not removed (http). { in: labels.FromMap(map[string]string{ model.AddressLabel: "1.2.3.4:80", @@ -396,9 +394,8 @@ func TestPopulateLabels(t *testing.T) { ScrapeInterval: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second), }, - noDefaultPort: true, res: labels.FromMap(map[string]string{ - model.AddressLabel: "1.2.3.4", + model.AddressLabel: "1.2.3.4:80", model.InstanceLabel: "1.2.3.4:80", model.SchemeLabel: "http", model.MetricsPathLabel: "/metrics", @@ -415,7 +412,7 @@ func TestPopulateLabels(t *testing.T) { model.ScrapeTimeoutLabel: "1s", }), }, - // Remove default port (https). + // verify that the default port is not removed (https). { in: labels.FromMap(map[string]string{ model.AddressLabel: "1.2.3.4:443", @@ -427,9 +424,8 @@ func TestPopulateLabels(t *testing.T) { ScrapeInterval: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second), }, - noDefaultPort: true, res: labels.FromMap(map[string]string{ - model.AddressLabel: "1.2.3.4", + model.AddressLabel: "1.2.3.4:443", model.InstanceLabel: "1.2.3.4:443", model.SchemeLabel: "https", model.MetricsPathLabel: "/metrics", @@ -450,7 +446,7 @@ func TestPopulateLabels(t *testing.T) { for _, c := range cases { in := c.in.Copy() - res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg, c.noDefaultPort) + res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg) if c.err != "" { require.EqualError(t, err, c.err) } else { diff --git a/scrape/scrape.go b/scrape/scrape.go index dca4682b11..e88eb15a9e 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -87,8 +87,6 @@ type scrapePool struct { // Constructor for new scrape loops. This is settable for testing convenience. newLoop func(scrapeLoopOptions) loop - noDefaultPort bool - metrics *scrapeMetrics scrapeFailureLogger log.Logger @@ -149,7 +147,6 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed logger: logger, metrics: metrics, httpOpts: options.HTTPClientOptions, - noDefaultPort: options.NoDefaultPort, } sp.newLoop = func(opts scrapeLoopOptions) loop { // Update the targets retrieval function for metadata to a new scrape cache. @@ -429,7 +426,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { sp.droppedTargets = []*Target{} sp.droppedTargetsCount = 0 for _, tg := range tgs { - targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb) + targets, failures := TargetsFromGroup(tg, sp.config, targets, lb) for _, err := range failures { level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) } diff --git a/scrape/target.go b/scrape/target.go index 3754398338..06d4737ff9 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -17,7 +17,6 @@ import ( "errors" "fmt" "hash/fnv" - "net" "net/url" "strings" "sync" @@ -424,7 +423,7 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels // PopulateLabels builds a label set from the given label set and scrape configuration. // It returns a label set before relabeling was applied as the second return value. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) { +func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -457,51 +456,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") } - // addPort checks whether we should add a default port to the address. - // If the address is not valid, we don't append a port either. - addPort := func(s string) (string, string, bool) { - // If we can split, a port exists and we don't have to add one. - if host, port, err := net.SplitHostPort(s); err == nil { - return host, port, false - } - // If adding a port makes it valid, the previous error - // was not due to an invalid address and we can append a port. - _, _, err := net.SplitHostPort(s + ":1234") - return "", "", err == nil - } - addr := lb.Get(model.AddressLabel) - scheme := lb.Get(model.SchemeLabel) - host, port, add := addPort(addr) - // If it's an address with no trailing port, infer it based on the used scheme - // unless the no-default-scrape-port feature flag is present. - if !noDefaultPort && add { - // Addresses reaching this point are already wrapped in [] if necessary. - switch scheme { - case "http", "": - addr += ":80" - case "https": - addr += ":443" - default: - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme) - } - lb.Set(model.AddressLabel, addr) - } - - if noDefaultPort { - // If it's an address with a trailing default port and the - // no-default-scrape-port flag is present, remove the port. - switch port { - case "80": - if scheme == "http" { - lb.Set(model.AddressLabel, host) - } - case "443": - if scheme == "https" { - lb.Set(model.AddressLabel, host) - } - } - } if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return labels.EmptyLabels(), labels.EmptyLabels(), err @@ -557,7 +512,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } // TargetsFromGroup builds targets based on the given TargetGroup and config. -func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) { +func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets []*Target, lb *labels.Builder) ([]*Target, []error) { targets = targets[:0] failures := []error{} @@ -573,7 +528,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault } } - lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort) + lset, origLabels, err := PopulateLabels(lb, cfg) if err != nil { failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) } diff --git a/scrape/target_test.go b/scrape/target_test.go index 84fe078b2b..bd27952874 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -348,7 +348,7 @@ func TestTargetsFromGroup(t *testing.T) { ScrapeInterval: model.Duration(1 * time.Minute), } lb := labels.NewBuilder(labels.EmptyLabels()) - targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, false, nil, lb) + targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, nil, lb) require.Len(t, targets, 1) require.Len(t, failures, 1) require.EqualError(t, failures[0], expectedError) @@ -435,7 +435,7 @@ scrape_configs: lb := labels.NewBuilder(labels.EmptyLabels()) group := &targetgroup.Group{Targets: targets} for i := 0; i < b.N; i++ { - tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], false, tgets, lb) + tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], tgets, lb) if len(targets) != nTargets { b.Fatalf("Expected %d targets, got %d", nTargets, len(targets)) } From fcbd18dabb39f4f8a05e4d486ea1c7c6538397c4 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 25 Sep 2024 18:27:27 +0200 Subject: [PATCH 053/137] Remove Query page alert close buttons that don't do anything Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/query/QueryPage.tsx | 2 -- 1 file changed, 2 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/QueryPage.tsx b/web/ui/mantine-ui/src/pages/query/QueryPage.tsx index 3baa77dfd0..71c969daf8 100644 --- a/web/ui/mantine-ui/src/pages/query/QueryPage.tsx +++ b/web/ui/mantine-ui/src/pages/query/QueryPage.tsx @@ -84,7 +84,6 @@ export default function QueryPage() { icon={} color="red" title="Error fetching metrics list" - withCloseButton > Unable to fetch list of metric names: {metricNamesError.message} @@ -95,7 +94,6 @@ export default function QueryPage() { icon={} color="red" title="Error fetching server time" - withCloseButton > {timeError.message} From 15b68e989cd347c03c36b8aeabae99f550a55361 Mon Sep 17 00:00:00 2001 From: Craig Ringer Date: Wed, 18 Oct 2023 14:11:51 +1300 Subject: [PATCH 054/137] Refer to staleness in instant vector documentation The instant vector documentation does not explain which metric samples are selected - in particular, it makes no reference to staleness. It's confusing when reading the docs to understand how exactly Prometheus selects the metrics to report: the most recent sample older than the search timestamp specified in the API request, so long as that metric is not "stale". Signed-off-by: Craig Ringer --- docs/querying/basics.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 4ea186beeb..66d7b8018d 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -109,8 +109,16 @@ single sample value for each at a given timestamp (point in time). In the simpl form, only a metric name is specified, which results in an instant vector containing elements for all time series that have this metric name. +The value returned will be that of the most recent sample at or before the +query's evaluation timestamp (in the case of an +[instant query](api.md#instant-queries)) +or the current step within the query (in the case of a +[range query](api.md/#range-queries)). +The [`@` modifier](#modifier) allows overriding the timestamp relative to which +the selection takes place. Time series are only returned if their most recent sample is less than the [lookback period](#staleness) ago. + This example selects all time series that have the `http_requests_total` metric -name: +name, returning the most recent sample for each: http_requests_total @@ -359,7 +367,8 @@ cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated time series do not precisely align in time. Because of their independence, Prometheus needs to assign a value at those timestamps for each relevant time series. It does so by taking the newest sample that is less than the lookback period ago. -The lookback period is 5 minutes by default. +The lookback period is 5 minutes by default, but can be +[set with the `--query.lookback-delta` flag](../command-line/prometheus.md) If a target scrape or rule evaluation no longer returns a sample for a time series that was previously present, this time series will be marked as stale. From 4fd2556baa8bc11d49529abb92163feca33d1a58 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Thu, 26 Sep 2024 15:43:19 +0200 Subject: [PATCH 055/137] Extract processWithBoundedParallelismAndConsistentWorkers Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 93 +++++++++++++++++++++++------------------- 1 file changed, 52 insertions(+), 41 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index e6a6c708ff..f8415407ef 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -300,52 +300,34 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma // Deleting label names mutates p.m map, so it should be done from a single goroutine after nobody else is reading it. deleteLabelNames := make(chan string, len(p.m)) - process := func(l labels.Label) { - orig := p.m[l.Name][l.Value] - repl := make([]storage.SeriesRef, 0, len(orig)) - for _, id := range orig { - if _, ok := deleted[id]; !ok { - repl = append(repl, id) + process, wait := processWithBoundedParallelismAndConsistentWorkers( + runtime.GOMAXPROCS(0), + func(l labels.Label) uint64 { return xxhash.Sum64String(l.Name) }, + func(l labels.Label) { + orig := p.m[l.Name][l.Value] + repl := make([]storage.SeriesRef, 0, len(orig)) + for _, id := range orig { + if _, ok := deleted[id]; !ok { + repl = append(repl, id) + } } - } - if len(repl) > 0 { - p.m[l.Name][l.Value] = repl - } else { - delete(p.m[l.Name], l.Value) - if len(p.m[l.Name]) == 0 { - // Delete the key if we removed all values. - deleteLabelNames <- l.Name + if len(repl) > 0 { + p.m[l.Name][l.Value] = repl + } else { + delete(p.m[l.Name], l.Value) + if len(p.m[l.Name]) == 0 { + // Delete the key if we removed all values. + deleteLabelNames <- l.Name + } } - } - } + }, + ) - // Create GOMAXPROCS workers. - wg := sync.WaitGroup{} - jobs := make([]chan labels.Label, runtime.GOMAXPROCS(0)) - for i := range jobs { - jobs[i] = make(chan labels.Label, 128) - wg.Add(1) - go func(jobs chan labels.Label) { - defer wg.Done() - for l := range jobs { - process(l) - } - }(jobs[i]) - } - - // Process all affected labels and the allPostingsKey. for l := range affected { - j := int(xxhash.Sum64String(l.Name) % uint64(len(jobs))) - jobs[j] <- l + process(l) } - j := int(xxhash.Sum64String(allPostingsKey.Name) % uint64(len(jobs))) - jobs[j] <- allPostingsKey - - // Close jobs channels and wait all workers to finish. - for i := range jobs { - close(jobs[i]) - } - wg.Wait() + process(allPostingsKey) + wait() // Close deleteLabelNames channel and delete the label names requested. close(deleteLabelNames) @@ -354,6 +336,35 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma } } +// processWithBoundedParallelismAndConsistentWorkers will call f() with bounded parallelism, +// making sure that elements with same hash(T) will always be processed by the same worker. +// Call process() to add more jobs to process, and once finished adding, call wait() to ensure that all jobs are processed. +func processWithBoundedParallelismAndConsistentWorkers[T any](workers int, hash func(T) uint64, f func(T)) (process func(T), wait func()) { + wg := &sync.WaitGroup{} + jobs := make([]chan T, workers) + for i := 0; i < workers; i++ { + wg.Add(1) + jobs[i] = make(chan T, 128) + go func(jobs <-chan T) { + defer wg.Done() + for l := range jobs { + f(l) + } + }(jobs[i]) + } + + process = func(job T) { + jobs[hash(job)%uint64(workers)] <- job + } + wait = func() { + for i := range jobs { + close(jobs[i]) + } + wg.Wait() + } + return process, wait +} + // Iter calls f for each postings list. It aborts if f returns an error and returns it. func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error { p.mtx.RLock() From 6bd9b1a7cc0369952da4467a4f38c4c8c6dd4629 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Fri, 19 Jul 2024 11:28:00 -0300 Subject: [PATCH 056/137] Histogram CT Zero ingestion Signed-off-by: Arthur Silva Sens --- cmd/prometheus/main.go | 4 + scrape/helpers_test.go | 20 ++- scrape/manager_test.go | 174 +++++++++++++++++++++++++++ scrape/scrape.go | 10 +- scrape/scrape_test.go | 6 +- storage/fanout.go | 14 +++ storage/interface.go | 17 ++- storage/remote/write.go | 5 + storage/remote/write_handler_test.go | 7 ++ tsdb/agent/db.go | 5 + tsdb/head_append.go | 113 ++++++++++++++++- tsdb/head_test.go | 160 ++++++++++++++++++++++++ 12 files changed, 526 insertions(+), 9 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e7fd82e6f3..7d9106b335 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1597,6 +1597,10 @@ func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels return 0, tsdb.ErrNotReady } +func (n notReadyAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, tsdb.ErrNotReady +} + func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index 116fa5c94b..4f7918f79e 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -55,6 +55,10 @@ func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *h return 0, nil } +func (a nopAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, nil +} + func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { return 0, nil } @@ -78,9 +82,10 @@ func equalFloatSamples(a, b floatSample) bool { } type histogramSample struct { - t int64 - h *histogram.Histogram - fh *histogram.FloatHistogram + metric labels.Labels + t int64 + h *histogram.Histogram + fh *histogram.FloatHistogram } type collectResultAppendable struct { @@ -146,7 +151,7 @@ func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.L func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { a.mtx.Lock() defer a.mtx.Unlock() - a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t}) + a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t, metric: l}) if a.next == nil { return 0, nil } @@ -154,6 +159,13 @@ func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels. return a.next.AppendHistogram(ref, l, t, h, fh) } +func (a *collectResultAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if h != nil { + return a.AppendHistogram(ref, l, ct, &histogram.Histogram{}, nil) + } + return a.AppendHistogram(ref, l, ct, nil, &histogram.FloatHistogram{}) +} + func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { a.mtx.Lock() defer a.mtx.Unlock() diff --git a/scrape/manager_test.go b/scrape/manager_test.go index cd712ca62b..13a3698127 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -39,8 +39,10 @@ import ( "github.com/prometheus/prometheus/discovery" _ "github.com/prometheus/prometheus/discovery/file" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/runutil" "github.com/prometheus/prometheus/util/testutil" ) @@ -858,6 +860,178 @@ func TestManagerCTZeroIngestion(t *testing.T) { } } +// generateTestHistogram generates the same thing as tsdbutil.GenerateTestHistogram, +// but in the form of dto.Histogram. +func generateTestHistogram(i int) *dto.Histogram { + helper := tsdbutil.GenerateTestHistogram(i) + h := &dto.Histogram{} + h.SampleCount = proto.Uint64(helper.Count) + h.SampleSum = proto.Float64(helper.Sum) + h.Schema = proto.Int32(helper.Schema) + h.ZeroThreshold = proto.Float64(helper.ZeroThreshold) + h.ZeroCount = proto.Uint64(helper.ZeroCount) + h.PositiveSpan = make([]*dto.BucketSpan, len(helper.PositiveSpans)) + for i, span := range helper.PositiveSpans { + h.PositiveSpan[i] = &dto.BucketSpan{ + Offset: proto.Int32(span.Offset), + Length: proto.Uint32(span.Length), + } + } + h.PositiveDelta = helper.PositiveBuckets + h.NegativeSpan = make([]*dto.BucketSpan, len(helper.NegativeSpans)) + for i, span := range helper.NegativeSpans { + h.NegativeSpan[i] = &dto.BucketSpan{ + Offset: proto.Int32(span.Offset), + Length: proto.Uint32(span.Length), + } + } + h.NegativeDelta = helper.NegativeBuckets + return h +} + +func TestManagerCTZeroIngestionHistogram(t *testing.T) { + const mName = "expected_histogram" + + for _, tc := range []struct { + name string + inputHistSample *dto.Histogram + enableCTZeroIngestion bool + }{ + { + name: "disabled with CT on histogram", + inputHistSample: func() *dto.Histogram { + h := generateTestHistogram(0) + h.CreatedTimestamp = timestamppb.Now() + return h + }(), + enableCTZeroIngestion: false, + }, + { + name: "enabled with CT on histogram", + inputHistSample: func() *dto.Histogram { + h := generateTestHistogram(0) + h.CreatedTimestamp = timestamppb.Now() + return h + }(), + enableCTZeroIngestion: true, + }, + { + name: "enabled without CT on histogram", + inputHistSample: func() *dto.Histogram { + h := generateTestHistogram(0) + return h + }(), + enableCTZeroIngestion: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + app := &collectResultAppender{} + scrapeManager, err := NewManager( + &Options{ + EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion, + EnableNativeHistogramsIngestion: true, + skipOffsetting: true, + }, + log.NewLogfmtLogger(os.Stderr), + nil, + &collectResultAppendable{app}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ + GlobalConfig: config.GlobalConfig{ + // Disable regular scrapes. + ScrapeInterval: model.Duration(9999 * time.Minute), + ScrapeTimeout: model.Duration(5 * time.Second), + // Ensure the proto is chosen. We need proto as it's the only protocol + // with the CT parsing support. + ScrapeProtocols: []config.ScrapeProtocol{config.PrometheusProto}, + }, + ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test"}}, + })) + + once := sync.Once{} + // Start fake HTTP target to that allow one scrape only. + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fail := true // TODO(bwplotka): Kill or use? + once.Do(func() { + fail = false + w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) + + ctrType := dto.MetricType_HISTOGRAM + w.Write(protoMarshalDelimited(t, &dto.MetricFamily{ + Name: proto.String(mName), + Type: &ctrType, + Metric: []*dto.Metric{{Histogram: tc.inputHistSample}}, + })) + }) + + if fail { + w.WriteHeader(http.StatusInternalServerError) + } + }), + ) + defer server.Close() + + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + // Add fake target directly into tsets + reload. Normally users would use + // Manager.Run and wait for minimum 5s refresh interval. + scrapeManager.updateTsets(map[string][]*targetgroup.Group{ + "test": {{ + Targets: []model.LabelSet{{ + model.SchemeLabel: model.LabelValue(serverURL.Scheme), + model.AddressLabel: model.LabelValue(serverURL.Host), + }}, + }}, + }) + scrapeManager.reload() + + var got []histogramSample + + // Wait for one scrape. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { + app.mtx.Lock() + defer app.mtx.Unlock() + + // Check if scrape happened and grab the relevant histograms, they have to be there - or it's a bug + // and it's not worth waiting. + for _, h := range app.resultHistograms { + if h.metric.Get(model.MetricNameLabel) == mName { + got = append(got, h) + } + } + if len(app.resultHistograms) > 0 { + return nil + } + return fmt.Errorf("expected some histogram samples, got none") + }), "after 1 minute") + scrapeManager.Stop() + + // Check for zero samples, assuming we only injected always one histogram sample. + // Did it contain CT to inject? If yes, was CT zero enabled? + if tc.inputHistSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion { + require.Len(t, got, 2) + // Zero sample. + require.Equal(t, histogram.Histogram{}, *got[0].h) + // Quick soft check to make sure it's the same sample or at least not zero. + require.Equal(t, tc.inputHistSample.GetSampleSum(), got[1].h.Sum) + return + } + + // Expect only one, valid sample. + require.Len(t, got, 1) + // Quick soft check to make sure it's the same sample or at least not zero. + require.Equal(t, tc.inputHistSample.GetSampleSum(), got[0].h.Sum) + }) + } +} + func TestUnregisterMetrics(t *testing.T) { reg := prometheus.NewRegistry() // Check that all metrics can be unregistered, allowing a second manager to be created. diff --git a/scrape/scrape.go b/scrape/scrape.go index dca4682b11..f29beeb03f 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1701,7 +1701,15 @@ loop: } else { if sl.enableCTZeroIngestion { if ctMs := p.CreatedTimestamp(); ctMs != nil { - ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if isHistogram && sl.enableNativeHistogramIngestion { + if h != nil { + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, h, nil) + } else { + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, nil, fh) + } + } else { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + } if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. // CT is an experimental feature. For now, we don't need to fail the // scrape on errors updating the created timestamp, log debug. diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 9887924c33..9e49fe8efa 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1999,7 +1999,8 @@ metric: < `, contentType: "application/vnd.google.protobuf", histograms: []histogramSample{{ - t: 1234568, + t: 1234568, + metric: labels.FromStrings("__name__", "test_histogram"), h: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -2125,7 +2126,8 @@ metric: < {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175}, }, histograms: []histogramSample{{ - t: 1234568, + t: 1234568, + metric: labels.FromStrings("__name__", "test_histogram"), h: &histogram.Histogram{ Count: 175, ZeroCount: 2, diff --git a/storage/fanout.go b/storage/fanout.go index e52342bc7e..80022b2566 100644 --- a/storage/fanout.go +++ b/storage/fanout.go @@ -190,6 +190,20 @@ func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64 return ref, nil } +func (f *fanoutAppender) AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) { + ref, err := f.primary.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) { ref, err := f.primary.UpdateMetadata(ref, l, m) if err != nil { diff --git a/storage/interface.go b/storage/interface.go index 9654c88331..7ac93129e8 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -50,7 +50,8 @@ var ( // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected // behaviour, and we currently don't have a way to determine this. As a result // it's recommended to ignore this error for now. - ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") + ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") + ErrCTNewerThanSample = fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") ) // SeriesRef is a generic series reference. In prometheus it is either a @@ -313,6 +314,20 @@ type HistogramAppender interface { // pointer. AppendHistogram won't mutate the histogram, but in turn // depends on the caller to not mutate it either. AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) + // AppendHistogramCTZeroSample adds synthetic zero sample for the given ct timestamp, + // which will be associated with given series, labels and the incoming + // sample's t (timestamp). AppendHistogramCTZeroSample returns error if zero sample can't be + // appended, for example when ct is too old, or when it would collide with + // incoming sample (sample has priority). + // + // AppendHistogramCTZeroSample has to be called before the corresponding histogram AppendHistogram. + // A series reference number is returned which can be used to modify the + // CT for the given series in the same or later transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to AppendHistogramCTZeroSample() at any point. + // + // If the reference is 0 it must not be used for caching. + AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) } // MetadataUpdater provides an interface for associating metadata to stored series. diff --git a/storage/remote/write.go b/storage/remote/write.go index eba4290840..624732c4fe 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -306,6 +306,11 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, return 0, nil } +func (t *timestampTracker) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + // TODO: Implement + return 0, nil +} + func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. // UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now. diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 5c89a1ab95..8e628f40de 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -915,6 +915,13 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t return 0, nil } +func (m *mockAppendable) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + // AppendCTZeroSample is no-op for remote-write for now. + // TODO(bwplotka/arthursens): Add support for PRW 2.0 for CT zero feature (but also we might + // replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218). + return 0, nil +} + func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) { if m.updateMetadataErr != nil { return 0, m.updateMetadataErr diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 596d5c8a31..5e33fce808 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -972,6 +972,11 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int return storage.SeriesRef(series.ref), nil } +func (a *appender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + // TODO(bwplotka/arthursens): Wire metadata in the Agent's appender. + return 0, nil +} + func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { // TODO: Wire metadata in the Agent's appender. return 0, nil diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 3dd9a367b0..10fb17809b 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -79,6 +79,16 @@ func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t return a.app.AppendHistogram(ref, l, t, h, fh) } +func (a *initAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) + } + a.head.initTime(t) + a.app = a.head.appender() + + return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) +} + func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { if a.app != nil { return a.app.UpdateMetadata(ref, l, m) @@ -388,7 +398,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 // storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation. func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { if ct >= t { - return 0, fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") + return 0, storage.ErrCTNewerThanSample } s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) @@ -747,6 +757,107 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels return storage.SeriesRef(s.ref), nil } +func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if !a.head.opts.EnableNativeHistograms.Load() { + return 0, storage.ErrNativeHistogramsDisabled + } + + if ct >= t { + return 0, storage.ErrCTNewerThanSample + } + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + // Ensure no empty labels have gotten through. + lset = lset.WithoutEmpty() + if lset.IsEmpty() { + return 0, fmt.Errorf("empty labelset: %w", ErrInvalidSample) + } + + if l, dup := lset.HasDuplicateLabelNames(); dup { + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) + } + + var created bool + var err error + s, created, err = a.head.getOrCreate(lset.Hash(), lset) + if err != nil { + return 0, err + } + if created { + switch { + case h != nil: + s.lastHistogramValue = &histogram.Histogram{} + case fh != nil: + s.lastFloatHistogramValue = &histogram.FloatHistogram{} + } + a.series = append(a.series, record.RefSeries{ + Ref: s.ref, + Labels: lset, + }) + } + } + + switch { + case h != nil: + zeroHistogram := &histogram.Histogram{} + s.Lock() + // Although we call `appendableHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // We set it to true to make this implementation as close as possible to the float implementation. + isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) + if err != nil { + s.Unlock() + if errors.Is(err, storage.ErrOutOfOrderSample) { + return 0, storage.ErrOutOfOrderCT + } + } + // OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples. + // This is to prevent the injected zero from being marked as OOO forever. + if isOOO { + s.Unlock() + return 0, storage.ErrOutOfOrderCT + } + s.pendingCommit = true + s.Unlock() + a.histograms = append(a.histograms, record.RefHistogramSample{ + Ref: s.ref, + T: ct, + H: zeroHistogram, + }) + a.histogramSeries = append(a.histogramSeries, s) + case fh != nil: + zeroFloatHistogram := &histogram.FloatHistogram{} + s.Lock() + // Although we call `appendableFloatHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // We set it to true to make this implementation as close as possible to the float implementation. + isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) // OOO is not allowed for CTZeroSamples. + if err != nil { + s.Unlock() + if errors.Is(err, storage.ErrOutOfOrderSample) { + return 0, storage.ErrOutOfOrderCT + } + } + // OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples. + // This is to prevent the injected zero from being marked as OOO forever. + if isOOO { + s.Unlock() + return 0, storage.ErrOutOfOrderCT + } + s.pendingCommit = true + s.Unlock() + a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{ + Ref: s.ref, + T: ct, + FH: zeroFloatHistogram, + }) + a.floatHistogramSeries = append(a.floatHistogramSeries, s) + } + + if ct > a.maxt { + a.maxt = ct + } + return storage.SeriesRef(s.ref), nil +} + // UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't // use getOrCreate or make any of the lset sanity checks that Append does. func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 483121dc66..ebfd1ff8b4 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -6363,6 +6363,166 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { } } +func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { + testHistogram := tsdbutil.GenerateTestHistogram(1) + testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1) + lbls := labels.FromStrings("foo", "bar") + type appendableHistograms struct { + ts int64 + h *histogram.Histogram + fh *histogram.FloatHistogram + ct int64 + } + for _, tc := range []struct { + name string + appendableHistograms []appendableHistograms + expectedHistograms []chunks.Sample + }{ + { + name: "In order ct+normal sample/histogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 101, h: testHistogram, ct: 1}, + }, + expectedHistograms: func() []chunks.Sample { + hNoCounterReset := *testHistogram + hNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &hNoCounterReset}, + } + }(), + }, + { + name: "In order ct+normal sample/floathistogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 101, fh: testFloatHistogram, ct: 1}, + }, + expectedHistograms: func() []chunks.Sample { + fhNoCounterReset := *testFloatHistogram + fhNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &fhNoCounterReset}, + } + }(), + }, + { + name: "Consecutive appends with same ct ignore ct/histogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 101, h: testHistogram, ct: 1}, + }, + expectedHistograms: func() []chunks.Sample { + hNoCounterReset := *testHistogram + hNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &hNoCounterReset}, + } + }(), + }, + { + name: "Consecutive appends with same ct ignore ct/floathistogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 101, fh: testFloatHistogram, ct: 1}, + }, + expectedHistograms: func() []chunks.Sample { + fhNoCounterReset := *testFloatHistogram + fhNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &fhNoCounterReset}, + } + }(), + }, + { + name: "Consecutive appends with newer ct do not ignore ct/histogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 102, h: testHistogram, ct: 101}, + }, + expectedHistograms: []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &histogram.Histogram{CounterResetHint: histogram.CounterReset}}, + sample{t: 102, h: testHistogram}, + }, + }, + { + name: "Consecutive appends with newer ct do not ignore ct/floathistogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 102, fh: testFloatHistogram, ct: 101}, + }, + expectedHistograms: []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &histogram.FloatHistogram{CounterResetHint: histogram.CounterReset}}, + sample{t: 102, fh: testFloatHistogram}, + }, + }, + { + name: "CT equals to previous sample timestamp is ignored/histogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 101, h: testHistogram, ct: 100}, + }, + expectedHistograms: func() []chunks.Sample { + hNoCounterReset := *testHistogram + hNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &hNoCounterReset}, + } + }(), + }, + { + name: "CT equals to previous sample timestamp is ignored/floathistogram", + appendableHistograms: []appendableHistograms{ + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 101, fh: testFloatHistogram, ct: 100}, + }, + expectedHistograms: func() []chunks.Sample { + fhNoCounterReset := *testFloatHistogram + fhNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &fhNoCounterReset}, + } + }(), + }, + } { + t.Run(tc.name, func(t *testing.T) { + head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + defer func() { + require.NoError(t, head.Close()) + }() + appender := head.Appender(context.Background()) + for _, sample := range tc.appendableHistograms { + ref, err := appender.AppendHistogramCTZeroSample(0, lbls, sample.ts, sample.ct, sample.h, sample.fh) + require.NoError(t, err) + _, err = appender.AppendHistogram(ref, lbls, sample.ts, sample.h, sample.fh) + require.NoError(t, err) + } + require.NoError(t, appender.Commit()) + + q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + result := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) + require.Equal(t, tc.expectedHistograms, result[`{foo="bar"}`]) + }) + } +} + func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) { // Use a chunk range of 1 here so that if we attempted to determine if the head // was compactable using default values for min and max times, `Head.compactable()` From 95a53ef982135de8faf9ba2757143e31dc0490f4 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 25 Sep 2024 16:17:20 -0300 Subject: [PATCH 057/137] Join tests for appending float and histogram CTs Signed-off-by: Arthur Silva Sens --- tsdb/head_test.go | 197 +++++++++++++++++++++------------------------- 1 file changed, 88 insertions(+), 109 deletions(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index ebfd1ff8b4..8c401bc6f9 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -6281,11 +6281,15 @@ func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing require.ErrorIs(t, err, storage.NewDuplicateHistogramToFloatErr(2_000, 10.0)) } -func TestHeadAppender_AppendCTZeroSample(t *testing.T) { +func TestHeadAppender_AppendCT(t *testing.T) { + testHistogram := tsdbutil.GenerateTestHistogram(1) + testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1) type appendableSamples struct { - ts int64 - val float64 - ct int64 + ts int64 + fSample float64 + h *histogram.Histogram + fh *histogram.FloatHistogram + ct int64 } for _, tc := range []struct { name string @@ -6293,20 +6297,10 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { expectedSamples []chunks.Sample }{ { - name: "In order ct+normal sample", + name: "In order ct+normal sample/floatSample", appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - }, - expectedSamples: []chunks.Sample{ - sample{t: 1, f: 0}, - sample{t: 100, f: 10}, - }, - }, - { - name: "Consecutive appends with same ct ignore ct", - appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - {ts: 101, val: 10, ct: 1}, + {ts: 100, fSample: 10, ct: 1}, + {ts: 101, fSample: 10, ct: 1}, }, expectedSamples: []chunks.Sample{ sample{t: 1, f: 0}, @@ -6314,77 +6308,13 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { sample{t: 101, f: 10}, }, }, - { - name: "Consecutive appends with newer ct do not ignore ct", - appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - {ts: 102, val: 10, ct: 101}, - }, - expectedSamples: []chunks.Sample{ - sample{t: 1, f: 0}, - sample{t: 100, f: 10}, - sample{t: 101, f: 0}, - sample{t: 102, f: 10}, - }, - }, - { - name: "CT equals to previous sample timestamp is ignored", - appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - {ts: 101, val: 10, ct: 100}, - }, - expectedSamples: []chunks.Sample{ - sample{t: 1, f: 0}, - sample{t: 100, f: 10}, - sample{t: 101, f: 10}, - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) - defer func() { - require.NoError(t, h.Close()) - }() - a := h.Appender(context.Background()) - lbls := labels.FromStrings("foo", "bar") - for _, sample := range tc.appendableSamples { - _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) - require.NoError(t, err) - _, err = a.Append(0, lbls, sample.ts, sample.val) - require.NoError(t, err) - } - require.NoError(t, a.Commit()) - - q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) - require.NoError(t, err) - result := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.Equal(t, tc.expectedSamples, result[`{foo="bar"}`]) - }) - } -} - -func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { - testHistogram := tsdbutil.GenerateTestHistogram(1) - testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1) - lbls := labels.FromStrings("foo", "bar") - type appendableHistograms struct { - ts int64 - h *histogram.Histogram - fh *histogram.FloatHistogram - ct int64 - } - for _, tc := range []struct { - name string - appendableHistograms []appendableHistograms - expectedHistograms []chunks.Sample - }{ { name: "In order ct+normal sample/histogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, h: testHistogram, ct: 1}, {ts: 101, h: testHistogram, ct: 1}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { hNoCounterReset := *testHistogram hNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6396,11 +6326,11 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { }, { name: "In order ct+normal sample/floathistogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, fh: testFloatHistogram, ct: 1}, {ts: 101, fh: testFloatHistogram, ct: 1}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { fhNoCounterReset := *testFloatHistogram fhNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6410,13 +6340,25 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { } }(), }, + { + name: "Consecutive appends with same ct ignore ct/floatSample", + appendableSamples: []appendableSamples{ + {ts: 100, fSample: 10, ct: 1}, + {ts: 101, fSample: 10, ct: 1}, + }, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 10}, + }, + }, { name: "Consecutive appends with same ct ignore ct/histogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, h: testHistogram, ct: 1}, {ts: 101, h: testHistogram, ct: 1}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { hNoCounterReset := *testHistogram hNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6428,11 +6370,11 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { }, { name: "Consecutive appends with same ct ignore ct/floathistogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, fh: testFloatHistogram, ct: 1}, {ts: 101, fh: testFloatHistogram, ct: 1}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { fhNoCounterReset := *testFloatHistogram fhNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6442,13 +6384,26 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { } }(), }, + { + name: "Consecutive appends with newer ct do not ignore ct/floatSample", + appendableSamples: []appendableSamples{ + {ts: 100, fSample: 10, ct: 1}, + {ts: 102, fSample: 10, ct: 101}, + }, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 0}, + sample{t: 102, f: 10}, + }, + }, { name: "Consecutive appends with newer ct do not ignore ct/histogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, h: testHistogram, ct: 1}, {ts: 102, h: testHistogram, ct: 101}, }, - expectedHistograms: []chunks.Sample{ + expectedSamples: []chunks.Sample{ sample{t: 1, h: &histogram.Histogram{}}, sample{t: 100, h: testHistogram}, sample{t: 101, h: &histogram.Histogram{CounterResetHint: histogram.CounterReset}}, @@ -6457,24 +6412,36 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { }, { name: "Consecutive appends with newer ct do not ignore ct/floathistogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, fh: testFloatHistogram, ct: 1}, {ts: 102, fh: testFloatHistogram, ct: 101}, }, - expectedHistograms: []chunks.Sample{ + expectedSamples: []chunks.Sample{ sample{t: 1, fh: &histogram.FloatHistogram{}}, sample{t: 100, fh: testFloatHistogram}, sample{t: 101, fh: &histogram.FloatHistogram{CounterResetHint: histogram.CounterReset}}, sample{t: 102, fh: testFloatHistogram}, }, }, + { + name: "CT equals to previous sample timestamp is ignored/floatSample", + appendableSamples: []appendableSamples{ + {ts: 100, fSample: 10, ct: 1}, + {ts: 101, fSample: 10, ct: 100}, + }, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 10}, + }, + }, { name: "CT equals to previous sample timestamp is ignored/histogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, h: testHistogram, ct: 1}, {ts: 101, h: testHistogram, ct: 100}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { hNoCounterReset := *testHistogram hNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6486,11 +6453,11 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { }, { name: "CT equals to previous sample timestamp is ignored/floathistogram", - appendableHistograms: []appendableHistograms{ + appendableSamples: []appendableSamples{ {ts: 100, fh: testFloatHistogram, ct: 1}, {ts: 101, fh: testFloatHistogram, ct: 100}, }, - expectedHistograms: func() []chunks.Sample { + expectedSamples: func() []chunks.Sample { fhNoCounterReset := *testFloatHistogram fhNoCounterReset.CounterResetHint = histogram.NotCounterReset return []chunks.Sample{ @@ -6502,23 +6469,35 @@ func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) defer func() { - require.NoError(t, head.Close()) + require.NoError(t, h.Close()) }() - appender := head.Appender(context.Background()) - for _, sample := range tc.appendableHistograms { - ref, err := appender.AppendHistogramCTZeroSample(0, lbls, sample.ts, sample.ct, sample.h, sample.fh) - require.NoError(t, err) - _, err = appender.AppendHistogram(ref, lbls, sample.ts, sample.h, sample.fh) - require.NoError(t, err) - } - require.NoError(t, appender.Commit()) + a := h.Appender(context.Background()) + lbls := labels.FromStrings("foo", "bar") + for _, sample := range tc.appendableSamples { + // Append float if it's a float test case + if sample.fSample != 0 { + _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) + require.NoError(t, err) + _, err = a.Append(0, lbls, sample.ts, sample.fSample) + require.NoError(t, err) + } - q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64) + // Append histograms if it's a histogram test case + if sample.h != nil || sample.fh != nil { + ref, err := a.AppendHistogramCTZeroSample(0, lbls, sample.ts, sample.ct, sample.h, sample.fh) + require.NoError(t, err) + _, err = a.AppendHistogram(ref, lbls, sample.ts, sample.h, sample.fh) + require.NoError(t, err) + } + } + require.NoError(t, a.Commit()) + + q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) require.NoError(t, err) result := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.Equal(t, tc.expectedHistograms, result[`{foo="bar"}`]) + require.Equal(t, tc.expectedSamples, result[`{foo="bar"}`]) }) } } From 5710ddf24fd9a15710b391400aae4e4cc426e97a Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 26 Sep 2024 15:32:18 +0100 Subject: [PATCH 058/137] [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers (#13909) * [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers So they don't continue to report stale values. Signed-off-by: Bryan Boreham --- notifier/notifier.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/notifier/notifier.go b/notifier/notifier.go index 218e4cb8c7..5374e73d62 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -770,6 +770,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { s.mtx.Lock() defer s.mtx.Unlock() + previousAms := s.ams // Set new Alertmanagers and deduplicate them along their unique URL. s.ams = []alertmanager{} s.droppedAms = []alertmanager{} @@ -789,6 +790,17 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { seen[us] = struct{}{} s.ams = append(s.ams, am) } + // Now remove counters for any removed Alertmanagers. + for _, am := range previousAms { + us := am.url().String() + if _, ok := seen[us]; ok { + continue + } + s.metrics.latency.DeleteLabelValues(us) + s.metrics.sent.DeleteLabelValues(us) + s.metrics.errors.DeleteLabelValues(us) + seen[us] = struct{}{} + } } func postPath(pre string, v config.AlertmanagerAPIVersion) string { From 79a6238e195ecc1c20937036c1e3b4e3bdaddc49 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Thu, 26 Sep 2024 18:35:15 +0200 Subject: [PATCH 059/137] scrape/scrape_test.go: reduce the time it takes to reload the manager (#14447) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * scrape/scrape_test.go: reduce the time it takes to reload the manager TestNativeHistogramMaxSchemaSet took over 3x5s to complete because there's a minimum reload interval. I've made the testcases run in parallel and reduced the reload interval to 10ms. Now the test runs in around 0.1-0.2 seconds. Ran test 10000 times to check if it's flaky. Signed-off-by: György Krajcsovits --------- Signed-off-by: György Krajcsovits --- scrape/scrape_test.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 9e49fe8efa..04fd536012 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -3773,7 +3773,9 @@ func TestNativeHistogramMaxSchemaSet(t *testing.T) { }, } for name, tc := range testcases { + tc := tc t.Run(name, func(t *testing.T) { + t.Parallel() testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema) }) } @@ -3816,8 +3818,8 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec configStr := fmt.Sprintf(` global: metric_name_validation_scheme: legacy - scrape_interval: 1s - scrape_timeout: 1s + scrape_interval: 50ms + scrape_timeout: 25ms scrape_configs: - job_name: test %s @@ -3830,7 +3832,7 @@ scrape_configs: s.DB.EnableNativeHistograms() reg := prometheus.NewRegistry() - mng, err := NewManager(&Options{EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) + mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) require.NoError(t, err) cfg, err := config.Load(configStr, false, log.NewNopLogger()) require.NoError(t, err) @@ -3861,7 +3863,7 @@ scrape_configs: countSeries++ } return countSeries > 0 - }, 15*time.Second, 100*time.Millisecond) + }, 5*time.Second, 100*time.Millisecond) // Check that native histogram schema is as expected. q, err := s.Querier(0, math.MaxInt64) From 8aef821e1002a3c4425f7c850b2ae9e69e28bb1e Mon Sep 17 00:00:00 2001 From: Neeraj Gartia Date: Fri, 27 Sep 2024 01:10:00 +0530 Subject: [PATCH 060/137] eval_info command Signed-off-by: Neeraj Gartia --- promql/promqltest/test.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index f0649a77a8..642b47444a 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -46,8 +46,8 @@ import ( var ( patSpace = regexp.MustCompile("[\t ]+") patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) - patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) - patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered|info))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) + patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) ) const ( @@ -321,6 +321,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { cmd.fail = true case "warn": cmd.warn = true + case "info": + cmd.info = true } for j := 1; i+1 < len(lines); j++ { @@ -657,10 +659,10 @@ type evalCmd struct { step time.Duration line int - isRange bool // if false, instant query - fail, warn, ordered bool - expectedFailMessage string - expectedFailRegexp *regexp.Regexp + isRange bool // if false, instant query + fail, warn, ordered, info bool + expectedFailMessage string + expectedFailRegexp *regexp.Regexp metrics map[uint64]labels.Labels expectScalar bool @@ -1208,13 +1210,16 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } - countWarnings, _ := res.Warnings.CountWarningsAndInfo() + countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() if !cmd.warn && countWarnings > 0 { return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) } if cmd.warn && countWarnings == 0 { return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) } + if cmd.info && countInfo == 0 { + return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line) + } err = cmd.compareResult(res.Value) if err != nil { return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) From 410fcce6f0484d59184e5c5c0dd13ea454cd2f9e Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 27 Sep 2024 07:45:49 +0100 Subject: [PATCH 061/137] Remove unnecessary pprof import (#14988) The pattern of `import _ "net/http/pprof"` adds handlers to the default http handler, but Prometheus does not use that. There are explicit handlers in `web/web.go`. So, we can remove this line with no impact to behaviour. Signed-off-by: Bryan Boreham --- cmd/prometheus/main.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 2a96a6ba7a..0e05b843d0 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -22,7 +22,6 @@ import ( "math/bits" "net" "net/http" - _ "net/http/pprof" // Comment this line to disable pprof endpoint. "net/url" "os" "os/signal" From ada8a6ef10c37ec0ea37b2e0c21e4ec2187a6fa8 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Fri, 27 Sep 2024 10:14:39 +0200 Subject: [PATCH 062/137] Add some more tests for MemPostings_Delete Signed-off-by: Oleg Zaytsev --- tsdb/index/postings_test.go | 86 +++++++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 27 deletions(-) diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 1802c9e891..b41fb54e65 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -973,37 +973,69 @@ func TestMemPostingsStats(t *testing.T) { } func TestMemPostings_Delete(t *testing.T) { - p := NewMemPostings() - p.Add(1, labels.FromStrings("lbl1", "a")) - p.Add(2, labels.FromStrings("lbl1", "b")) - p.Add(3, labels.FromStrings("lbl2", "a")) + t.Run("some postings", func(t *testing.T) { + p := NewMemPostings() + p.Add(1, labels.FromStrings("lbl1", "a")) + p.Add(2, labels.FromStrings("lbl1", "b")) + p.Add(3, labels.FromStrings("lbl2", "a")) - before := p.Get(allPostingsKey.Name, allPostingsKey.Value) - deletedRefs := map[storage.SeriesRef]struct{}{ - 2: {}, - } - affectedLabels := map[labels.Label]struct{}{ - {Name: "lbl1", Value: "b"}: {}, - } - p.Delete(deletedRefs, affectedLabels) - after := p.Get(allPostingsKey.Name, allPostingsKey.Value) + before := p.Get(allPostingsKey.Name, allPostingsKey.Value) + deletedRefs := map[storage.SeriesRef]struct{}{ + 2: {}, + } + affectedLabels := map[labels.Label]struct{}{ + {Name: "lbl1", Value: "b"}: {}, + } + p.Delete(deletedRefs, affectedLabels) + after := p.Get(allPostingsKey.Name, allPostingsKey.Value) - // Make sure postings gotten before the delete have the old data when - // iterated over. - expanded, err := ExpandPostings(before) - require.NoError(t, err) - require.Equal(t, []storage.SeriesRef{1, 2, 3}, expanded) + // Make sure postings gotten before the delete have the old data when + // iterated over. + expanded, err := ExpandPostings(before) + require.NoError(t, err) + require.Equal(t, []storage.SeriesRef{1, 2, 3}, expanded) - // Make sure postings gotten after the delete have the new data when - // iterated over. - expanded, err = ExpandPostings(after) - require.NoError(t, err) - require.Equal(t, []storage.SeriesRef{1, 3}, expanded) + // Make sure postings gotten after the delete have the new data when + // iterated over. + expanded, err = ExpandPostings(after) + require.NoError(t, err) + require.Equal(t, []storage.SeriesRef{1, 3}, expanded) - deleted := p.Get("lbl1", "b") - expanded, err = ExpandPostings(deleted) - require.NoError(t, err) - require.Empty(t, expanded, "expected empty postings, got %v", expanded) + deleted := p.Get("lbl1", "b") + expanded, err = ExpandPostings(deleted) + require.NoError(t, err) + require.Empty(t, expanded, "expected empty postings, got %v", expanded) + }) + + t.Run("all postings", func(t *testing.T) { + p := NewMemPostings() + p.Add(1, labels.FromStrings("lbl1", "a")) + p.Add(2, labels.FromStrings("lbl1", "b")) + p.Add(3, labels.FromStrings("lbl2", "a")) + + deletedRefs := map[storage.SeriesRef]struct{}{1: {}, 2: {}, 3: {}} + affectedLabels := map[labels.Label]struct{}{ + {Name: "lbl1", Value: "a"}: {}, + {Name: "lbl1", Value: "b"}: {}, + {Name: "lbl1", Value: "c"}: {}, + } + p.Delete(deletedRefs, affectedLabels) + after := p.Get(allPostingsKey.Name, allPostingsKey.Value) + expanded, err := ExpandPostings(after) + require.NoError(t, err) + require.Empty(t, expanded) + }) + + t.Run("nothing on empty mempostings", func(t *testing.T) { + p := NewMemPostings() + deletedRefs := map[storage.SeriesRef]struct{}{} + affectedLabels := map[labels.Label]struct{}{} + p.Delete(deletedRefs, affectedLabels) + after := p.Get(allPostingsKey.Name, allPostingsKey.Value) + expanded, err := ExpandPostings(after) + require.NoError(t, err) + require.Empty(t, expanded) + }) } // BenchmarkMemPostings_Delete is quite heavy, so consider running it with From b8e5b7cda9f9c589a3d4d9cb356a7fcf77d9cf17 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 27 Sep 2024 11:20:45 +0100 Subject: [PATCH 063/137] [REFACTOR] PromQL: remove label_join and label_replace stubs These functions operate on whole series, not on samples, so they do not fit into the table of functions that return a Vector. Remove the stub entries that were left to help downstream users of the code identify what changed. We cannot remove the entries from the `FunctionCalls` map without breaking `TestFunctionList`, so put some nils in to keep it happy. Signed-off-by: Bryan Boreham --- promql/functions.go | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index c4a7ee4a46..4333cb5ce0 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1514,11 +1514,6 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio return matrix, ws } -// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) === -func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // === Vector(s Scalar) (Vector, Annotations) === func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, @@ -1570,11 +1565,6 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) return matrix, ws } -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) === -func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // Common code for date related functions. func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { if len(vals) == 0 { @@ -1696,8 +1686,8 @@ var FunctionCalls = map[string]FunctionCall{ "idelta": funcIdelta, "increase": funcIncrease, "irate": funcIrate, - "label_replace": funcLabelReplace, - "label_join": funcLabelJoin, + "label_replace": nil, // evalLabelReplace not called via this map. + "label_join": nil, // evalLabelJoin not called via this map. "ln": funcLn, "log10": funcLog10, "log2": funcLog2, From 6cde0096e21b5852d9224f4f83ac630394038440 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 20 Sep 2024 13:29:34 +0200 Subject: [PATCH 064/137] Add notifications to the web UI when configuration reload fails. This commit introduces a new `/api/v1/notifications/live` endpoint that utilizes Server-Sent Events (SSE) to stream notifications to the web UI. This is used to display alerts such as when a configuration reload has failed. I opted for SSE over WebSockets because SSE is simpler to implement and more robust for our use case. Since we only need one-way communication from the server to the client, SSE fits perfectly without the overhead of establishing and maintaining a two-way WebSocket connection. When the SSE connection fails, we go back to a classic /api/v1/notifications API endpoint. This commit also contains the required UI changes for the new Mantine UI. Signed-off-by: Julien --- cmd/prometheus/main.go | 29 ++- web/api/notifications.go | 176 ++++++++++++++++ web/api/notifications_test.go | 192 ++++++++++++++++++ web/api/v1/api.go | 105 +++++++--- web/api/v1/errors_test.go | 2 + web/ui/mantine-ui/src/App.tsx | 77 +++---- web/ui/mantine-ui/src/api/api.ts | 3 + .../src/api/responseTypes/notifications.ts | 8 + .../src/components/NotificationsIcon.tsx | 62 ++++++ .../src/components/NotificationsProvider.tsx | 61 ++++++ .../mantine-ui/src/state/useNotifications.ts | 17 ++ web/web.go | 5 + 12 files changed, 668 insertions(+), 69 deletions(-) create mode 100644 web/api/notifications.go create mode 100644 web/api/notifications_test.go create mode 100644 web/ui/mantine-ui/src/api/responseTypes/notifications.ts create mode 100644 web/ui/mantine-ui/src/components/NotificationsIcon.tsx create mode 100644 web/ui/mantine-ui/src/components/NotificationsProvider.tsx create mode 100644 web/ui/mantine-ui/src/state/useNotifications.ts diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e7fd82e6f3..176ebd5b55 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -79,6 +79,7 @@ import ( "github.com/prometheus/prometheus/util/logging" prom_runtime "github.com/prometheus/prometheus/util/runtime" "github.com/prometheus/prometheus/web" + "github.com/prometheus/prometheus/web/api" ) var ( @@ -277,13 +278,17 @@ func main() { ) } + notifs := api.NewNotifications(prometheus.DefaultRegisterer) + cfg := flagConfig{ notifier: notifier.Options{ Registerer: prometheus.DefaultRegisterer, }, web: web.Options{ - Registerer: prometheus.DefaultRegisterer, - Gatherer: prometheus.DefaultGatherer, + Registerer: prometheus.DefaultRegisterer, + Gatherer: prometheus.DefaultGatherer, + NotificationsSub: notifs.Sub, + NotificationsGetter: notifs.Get, }, promlogConfig: promlog.Config{}, } @@ -1082,6 +1087,14 @@ func main() { } } + callback := func(success bool) { + if success { + notifs.DeleteNotification(api.ConfigurationUnsuccessful) + return + } + notifs.AddNotification(api.ConfigurationUnsuccessful) + } + g.Add( func() error { <-reloadReady.C @@ -1089,7 +1102,7 @@ func main() { for { select { case <-hup: - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { level.Error(logger).Log("msg", "Error reloading config", "err", err) } else if cfg.enableAutoReload { if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { @@ -1099,7 +1112,7 @@ func main() { } } case rc := <-webHandler.Reload(): - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { level.Error(logger).Log("msg", "Error reloading config", "err", err) rc <- err } else { @@ -1124,7 +1137,7 @@ func main() { } level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.") - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { level.Error(logger).Log("msg", "Error reloading config", "err", err) } else { checksum = currentChecksum @@ -1154,7 +1167,7 @@ func main() { return nil } - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil { return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err) } @@ -1380,7 +1393,7 @@ type reloader struct { reloader func(*config.Config) error } -func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { +func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) { start := time.Now() timings := []interface{}{} level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) @@ -1389,8 +1402,10 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b if err == nil { configSuccess.Set(1) configSuccessTime.SetToCurrentTime() + callback(true) } else { configSuccess.Set(0) + callback(false) } }() diff --git a/web/api/notifications.go b/web/api/notifications.go new file mode 100644 index 0000000000..47f29f6ebe --- /dev/null +++ b/web/api/notifications.go @@ -0,0 +1,176 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + ConfigurationUnsuccessful = "Configuration reload has failed." +) + +// Notification represents an individual notification message. +type Notification struct { + Text string `json:"text"` + Date time.Time `json:"date"` + Active bool `json:"active"` +} + +// Notifications stores a list of Notification objects. +// It also manages live subscribers that receive notifications via channels. +type Notifications struct { + mu sync.Mutex + notifications []Notification + subscribers map[chan Notification]struct{} // Active subscribers. + + subscriberGauge prometheus.Gauge + notificationsSent prometheus.Counter + notificationsDropped prometheus.Counter +} + +// NewNotifications creates a new Notifications instance. +func NewNotifications(reg prometheus.Registerer) *Notifications { + n := &Notifications{ + subscribers: make(map[chan Notification]struct{}), + subscriberGauge: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_active_subscribers", + Help: "The current number of active notification subscribers.", + }), + notificationsSent: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_updates_sent_total", + Help: "Total number of notification updates sent.", + }), + notificationsDropped: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_updates_dropped_total", + Help: "Total number of notification updates dropped.", + }), + } + + if reg != nil { + reg.MustRegister(n.subscriberGauge, n.notificationsSent, n.notificationsDropped) + } + + return n +} + +// AddNotification adds a new notification or updates the timestamp if it already exists. +func (n *Notifications) AddNotification(text string) { + n.mu.Lock() + defer n.mu.Unlock() + + for i, notification := range n.notifications { + if notification.Text == text { + n.notifications[i].Date = time.Now() + + n.notifySubscribers(n.notifications[i]) + return + } + } + + newNotification := Notification{ + Text: text, + Date: time.Now(), + Active: true, + } + n.notifications = append(n.notifications, newNotification) + + n.notifySubscribers(newNotification) +} + +// notifySubscribers sends a notification to all active subscribers. +func (n *Notifications) notifySubscribers(notification Notification) { + for sub := range n.subscribers { + // Non-blocking send to avoid subscriber blocking issues. + n.notificationsSent.Inc() + select { + case sub <- notification: + // Notification sent to the subscriber. + default: + // Drop the notification if the subscriber's channel is full. + n.notificationsDropped.Inc() + } + } +} + +// DeleteNotification removes the first notification that matches the provided text. +// The deleted notification is sent to subscribers with Active: false before being removed. +func (n *Notifications) DeleteNotification(text string) { + n.mu.Lock() + defer n.mu.Unlock() + + // Iterate through the notifications to find the matching text. + for i, notification := range n.notifications { + if notification.Text == text { + // Mark the notification as inactive and notify subscribers. + notification.Active = false + n.notifySubscribers(notification) + + // Remove the notification from the list. + n.notifications = append(n.notifications[:i], n.notifications[i+1:]...) + return + } + } +} + +// Get returns a copy of the list of notifications for safe access outside the struct. +func (n *Notifications) Get() []Notification { + n.mu.Lock() + defer n.mu.Unlock() + + // Return a copy of the notifications slice to avoid modifying the original slice outside. + notificationsCopy := make([]Notification, len(n.notifications)) + copy(notificationsCopy, n.notifications) + return notificationsCopy +} + +// Sub allows a client to subscribe to live notifications. +// It returns a channel where the subscriber will receive notifications and a function to unsubscribe. +// Each subscriber has its own goroutine to handle notifications and prevent blocking. +func (n *Notifications) Sub() (<-chan Notification, func()) { + ch := make(chan Notification, 10) // Buffered channel to prevent blocking. + + n.mu.Lock() + // Add the new subscriber to the list. + n.subscribers[ch] = struct{}{} + n.subscriberGauge.Set(float64(len(n.subscribers))) + + // Send all current notifications to the new subscriber. + for _, notification := range n.notifications { + ch <- notification + } + n.mu.Unlock() + + // Unsubscribe function to remove the channel from subscribers. + unsubscribe := func() { + n.mu.Lock() + defer n.mu.Unlock() + + // Close the channel and remove it from the subscribers map. + close(ch) + delete(n.subscribers, ch) + n.subscriberGauge.Set(float64(len(n.subscribers))) + } + + return ch, unsubscribe +} diff --git a/web/api/notifications_test.go b/web/api/notifications_test.go new file mode 100644 index 0000000000..7aa5961638 --- /dev/null +++ b/web/api/notifications_test.go @@ -0,0 +1,192 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestNotificationLifecycle tests adding, modifying, and deleting notifications. +func TestNotificationLifecycle(t *testing.T) { + notifs := NewNotifications(nil) + + // Add a notification. + notifs.AddNotification("Test Notification 1") + + // Check if the notification was added. + notifications := notifs.Get() + require.Len(t, notifications, 1, "Expected 1 notification after addition.") + require.Equal(t, "Test Notification 1", notifications[0].Text, "Notification text mismatch.") + require.True(t, notifications[0].Active, "Expected notification to be active.") + + // Modify the notification. + notifs.AddNotification("Test Notification 1") + notifications = notifs.Get() + require.Len(t, notifications, 1, "Expected 1 notification after modification.") + + // Delete the notification. + notifs.DeleteNotification("Test Notification 1") + notifications = notifs.Get() + require.Empty(t, notifications, "Expected no notifications after deletion.") +} + +// TestSubscriberReceivesNotifications tests that a subscriber receives notifications, including modifications and deletions. +func TestSubscriberReceivesNotifications(t *testing.T) { + notifs := NewNotifications(nil) + + // Subscribe to notifications. + sub, unsubscribe := notifs.Sub() + + var wg sync.WaitGroup + wg.Add(1) + + receivedNotifications := make([]Notification, 0) + + // Goroutine to listen for notifications. + go func() { + defer wg.Done() + for notification := range sub { + receivedNotifications = append(receivedNotifications, notification) + } + }() + + // Add notifications. + notifs.AddNotification("Test Notification 1") + notifs.AddNotification("Test Notification 2") + + // Modify a notification. + notifs.AddNotification("Test Notification 1") + + // Delete a notification. + notifs.DeleteNotification("Test Notification 2") + + // Wait for notifications to propagate. + time.Sleep(100 * time.Millisecond) + + unsubscribe() + wg.Wait() // Wait for the subscriber goroutine to finish. + + // Verify that we received the expected number of notifications. + require.Len(t, receivedNotifications, 4, "Expected 4 notifications (2 active, 1 modified, 1 deleted).") + + // Check the content and state of received notifications. + expected := []struct { + Text string + Active bool + }{ + {"Test Notification 1", true}, + {"Test Notification 2", true}, + {"Test Notification 1", true}, + {"Test Notification 2", false}, + } + + for i, n := range receivedNotifications { + require.Equal(t, expected[i].Text, n.Text, "Notification text mismatch at index %d.", i) + require.Equal(t, expected[i].Active, n.Active, "Notification active state mismatch at index %d.", i) + } +} + +// TestMultipleSubscribers tests that multiple subscribers receive notifications independently. +func TestMultipleSubscribers(t *testing.T) { + notifs := NewNotifications(nil) + + // Subscribe two subscribers to notifications. + sub1, unsubscribe1 := notifs.Sub() + + sub2, unsubscribe2 := notifs.Sub() + + var wg sync.WaitGroup + wg.Add(2) + + receivedSub1 := make([]Notification, 0) + receivedSub2 := make([]Notification, 0) + + // Goroutine for subscriber 1. + go func() { + defer wg.Done() + for notification := range sub1 { + receivedSub1 = append(receivedSub1, notification) + } + }() + + // Goroutine for subscriber 2. + go func() { + defer wg.Done() + for notification := range sub2 { + receivedSub2 = append(receivedSub2, notification) + } + }() + + // Add and delete notifications. + notifs.AddNotification("Test Notification 1") + notifs.DeleteNotification("Test Notification 1") + + // Wait for notifications to propagate. + time.Sleep(100 * time.Millisecond) + + // Unsubscribe both. + unsubscribe1() + unsubscribe2() + + wg.Wait() + + // Both subscribers should have received the same 2 notifications. + require.Len(t, receivedSub1, 2, "Expected 2 notifications for subscriber 1.") + require.Len(t, receivedSub2, 2, "Expected 2 notifications for subscriber 2.") + + // Verify that both subscribers received the same notifications. + for i := 0; i < 2; i++ { + require.Equal(t, receivedSub1[i], receivedSub2[i], "Subscriber notification mismatch at index %d.", i) + } +} + +// TestUnsubscribe tests that unsubscribing prevents further notifications from being received. +func TestUnsubscribe(t *testing.T) { + notifs := NewNotifications(nil) + + // Subscribe to notifications. + sub, unsubscribe := notifs.Sub() + + var wg sync.WaitGroup + wg.Add(1) + + receivedNotifications := make([]Notification, 0) + + // Goroutine to listen for notifications. + go func() { + defer wg.Done() + for notification := range sub { + receivedNotifications = append(receivedNotifications, notification) + } + }() + + // Add a notification and then unsubscribe. + notifs.AddNotification("Test Notification 1") + time.Sleep(100 * time.Millisecond) // Allow time for notification delivery. + unsubscribe() // Unsubscribe. + + // Add another notification after unsubscribing. + notifs.AddNotification("Test Notification 2") + + // Wait for the subscriber goroutine to finish. + wg.Wait() + + // Only the first notification should have been received. + require.Len(t, receivedNotifications, 1, "Expected 1 notification before unsubscribe.") + require.Equal(t, "Test Notification 1", receivedNotifications[0].Text, "Unexpected notification text.") +} diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 0ec8467faa..5eadbdbe75 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -15,6 +15,7 @@ package v1 import ( "context" + "encoding/json" "errors" "fmt" "math" @@ -54,6 +55,7 @@ import ( "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/httputil" "github.com/prometheus/prometheus/util/stats" + "github.com/prometheus/prometheus/web/api" ) type status string @@ -202,16 +204,18 @@ type API struct { ready func(http.HandlerFunc) http.HandlerFunc globalURLOptions GlobalURLOptions - db TSDBAdminStats - dbDir string - enableAdmin bool - logger log.Logger - CORSOrigin *regexp.Regexp - buildInfo *PrometheusVersion - runtimeInfo func() (RuntimeInfo, error) - gatherer prometheus.Gatherer - isAgent bool - statsRenderer StatsRenderer + db TSDBAdminStats + dbDir string + enableAdmin bool + logger log.Logger + CORSOrigin *regexp.Regexp + buildInfo *PrometheusVersion + runtimeInfo func() (RuntimeInfo, error) + gatherer prometheus.Gatherer + isAgent bool + statsRenderer StatsRenderer + notificationsGetter func() []api.Notification + notificationsSub func() (<-chan api.Notification, func()) remoteWriteHandler http.Handler remoteReadHandler http.Handler @@ -245,6 +249,8 @@ func NewAPI( corsOrigin *regexp.Regexp, runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, + notificationsGetter func() []api.Notification, + notificationsSub func() (<-chan api.Notification, func()), gatherer prometheus.Gatherer, registerer prometheus.Registerer, statsRenderer StatsRenderer, @@ -261,22 +267,24 @@ func NewAPI( targetRetriever: tr, alertmanagerRetriever: ar, - now: time.Now, - config: configFunc, - flagsMap: flagsMap, - ready: readyFunc, - globalURLOptions: globalURLOptions, - db: db, - dbDir: dbDir, - enableAdmin: enableAdmin, - rulesRetriever: rr, - logger: logger, - CORSOrigin: corsOrigin, - runtimeInfo: runtimeInfo, - buildInfo: buildInfo, - gatherer: gatherer, - isAgent: isAgent, - statsRenderer: DefaultStatsRenderer, + now: time.Now, + config: configFunc, + flagsMap: flagsMap, + ready: readyFunc, + globalURLOptions: globalURLOptions, + db: db, + dbDir: dbDir, + enableAdmin: enableAdmin, + rulesRetriever: rr, + logger: logger, + CORSOrigin: corsOrigin, + runtimeInfo: runtimeInfo, + buildInfo: buildInfo, + gatherer: gatherer, + isAgent: isAgent, + statsRenderer: DefaultStatsRenderer, + notificationsGetter: notificationsGetter, + notificationsSub: notificationsSub, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), } @@ -390,6 +398,8 @@ func (api *API) Register(r *route.Router) { r.Get("/status/flags", wrap(api.serveFlags)) r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus)) r.Get("/status/walreplay", api.serveWALReplayStatus) + r.Get("/notifications", api.notifications) + r.Get("/notifications/live", api.notificationsSSE) r.Post("/read", api.ready(api.remoteRead)) r.Post("/write", api.ready(api.remoteWrite)) r.Post("/otlp/v1/metrics", api.ready(api.otlpWrite)) @@ -1668,6 +1678,49 @@ func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) { }, nil, "") } +func (api *API) notifications(w http.ResponseWriter, r *http.Request) { + httputil.SetCORS(w, api.CORSOrigin, r) + api.respond(w, r, api.notificationsGetter(), nil, "") +} + +func (api *API) notificationsSSE(w http.ResponseWriter, r *http.Request) { + httputil.SetCORS(w, api.CORSOrigin, r) + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + + // Subscribe to notifications. + notifications, unsubscribe := api.notificationsSub() + defer unsubscribe() + + // Set up a flusher to push the response to the client. + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "Streaming unsupported", http.StatusInternalServerError) + return + } + + for { + select { + case notification := <-notifications: + // Marshal the notification to JSON. + jsonData, err := json.Marshal(notification) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + continue + } + + // Write the event data in SSE format with JSON content. + fmt.Fprintf(w, "data: %s\n\n", jsonData) + + // Flush the response to ensure the data is sent immediately. + flusher.Flush() + case <-r.Context().Done(): + return + } + } +} + func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) { // This is only really for tests - this will never be nil IRL. if api.remoteReadHandler != nil { diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index 7e1fc09d8a..db16b9fb3b 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -134,6 +134,8 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route regexp.MustCompile(".*"), func() (RuntimeInfo, error) { return RuntimeInfo{}, errors.New("not implemented") }, &PrometheusVersion{}, + nil, + nil, prometheus.DefaultGatherer, nil, nil, diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index aa5eb3714f..3e3466825c 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -64,6 +64,8 @@ import { useAppDispatch } from "./state/hooks"; import { updateSettings, useSettings } from "./state/settingsSlice"; import SettingsMenu from "./components/SettingsMenu"; import ReadinessWrapper from "./components/ReadinessWrapper"; +import NotificationsProvider from "./components/NotificationsProvider"; +import NotificationsIcon from "./components/NotificationsIcon"; import { QueryParamProvider } from "use-query-params"; import { ReactRouter6Adapter } from "use-query-params/adapters/react-router-6"; import ServiceDiscoveryPage from "./pages/service-discovery/ServiceDiscoveryPage"; @@ -314,6 +316,7 @@ function App() { const navActionIcons = ( <> + - - - - - - - - Prometheus{agentMode && " Agent"} + + + + + + + + + Prometheus{agentMode && " Agent"} + + + + {navLinks} - - - {navLinks} + + + {navActionIcons} - - {navActionIcons} - + - - - + - - {navLinks} - - {navActionIcons} - - + + {navLinks} + + {navActionIcons} + + + diff --git a/web/ui/mantine-ui/src/api/api.ts b/web/ui/mantine-ui/src/api/api.ts index d7446d6896..f1dd2b8c0c 100644 --- a/web/ui/mantine-ui/src/api/api.ts +++ b/web/ui/mantine-ui/src/api/api.ts @@ -93,6 +93,7 @@ type QueryOptions = { path: string; params?: Record; enabled?: boolean; + refetchInterval?: false | number; recordResponseTime?: (time: number) => void; }; @@ -102,6 +103,7 @@ export const useAPIQuery = ({ params, enabled, recordResponseTime, + refetchInterval, }: QueryOptions) => { const { pathPrefix } = useSettings(); @@ -109,6 +111,7 @@ export const useAPIQuery = ({ queryKey: key !== undefined ? key : [path, params], retry: false, refetchOnWindowFocus: false, + refetchInterval: refetchInterval, gcTime: 0, enabled, queryFn: createQueryFn({ pathPrefix, path, params, recordResponseTime }), diff --git a/web/ui/mantine-ui/src/api/responseTypes/notifications.ts b/web/ui/mantine-ui/src/api/responseTypes/notifications.ts new file mode 100644 index 0000000000..d6ebf68d41 --- /dev/null +++ b/web/ui/mantine-ui/src/api/responseTypes/notifications.ts @@ -0,0 +1,8 @@ +export interface Notification { + text: string; + date: string; + active: boolean; + modified: boolean; +} + +export type NotificationsResult = Notification[]; diff --git a/web/ui/mantine-ui/src/components/NotificationsIcon.tsx b/web/ui/mantine-ui/src/components/NotificationsIcon.tsx new file mode 100644 index 0000000000..5ab28b037a --- /dev/null +++ b/web/ui/mantine-ui/src/components/NotificationsIcon.tsx @@ -0,0 +1,62 @@ +import { ActionIcon, Indicator, Popover, Card, Text, Stack, ScrollArea, Group } from "@mantine/core"; +import { IconBell, IconAlertTriangle, IconNetworkOff } from "@tabler/icons-react"; +import { useNotifications } from '../state/useNotifications'; +import { actionIconStyle } from "../styles"; +import { useSettings } from '../state/settingsSlice'; +import { formatTimestamp } from "../lib/formatTime"; + +const NotificationsIcon = () => { + const { notifications, isConnectionError } = useNotifications(); + const { useLocalTime } = useSettings(); + + return ( + (notifications.length === 0 && !isConnectionError) ? null : ( + + + + + + + + + + + Notifications + + { isConnectionError ? ( + + + + + Real-time notifications interrupted. + Please refresh the page or check your connection. + + + + ) : notifications.length === 0 ? ( + No notifications + ) : (notifications.map((notification, index) => ( + + + + + {notification.text} + {formatTimestamp(new Date(notification.date).valueOf() / 1000, useLocalTime)} + + + + )))} + + + + + + ) + ); +}; + +export default NotificationsIcon; diff --git a/web/ui/mantine-ui/src/components/NotificationsProvider.tsx b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx new file mode 100644 index 0000000000..73de54131e --- /dev/null +++ b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx @@ -0,0 +1,61 @@ +import React, { useEffect, useState } from 'react'; +import { useSettings } from '../state/settingsSlice'; +import { NotificationsContext } from '../state/useNotifications'; +import { Notification, NotificationsResult } from "../api/responseTypes/notifications"; +import { useAPIQuery } from '../api/api'; + +export const NotificationsProvider: React.FC<{ children: React.ReactNode }> = ({ children }) => { + const { pathPrefix } = useSettings(); + const [notifications, setNotifications] = useState([]); + const [isConnectionError, setIsConnectionError] = useState(false); + const [shouldFetchFromAPI, setShouldFetchFromAPI] = useState(false); + + const { data, isError } = useAPIQuery({ + path: '/notifications', + enabled: shouldFetchFromAPI, + refetchInterval: 10000, + }); + + useEffect(() => { + if (data && data.data) { + setNotifications(data.data); + } + setIsConnectionError(isError); + }, [data, isError]); + + useEffect(() => { + const eventSource = new EventSource(`${pathPrefix}/api/v1/notifications/live`); + + eventSource.onmessage = (event) => { + const notification: Notification = JSON.parse(event.data); + + setNotifications((prev: Notification[]) => { + const updatedNotifications = [...prev.filter((n: Notification) => n.text !== notification.text)]; + + if (notification.active) { + updatedNotifications.push(notification); + } + + return updatedNotifications; + }); + }; + + eventSource.onerror = () => { + eventSource.close(); + setIsConnectionError(true); + setShouldFetchFromAPI(true); + }; + + return () => { + eventSource.close(); + }; + }, [pathPrefix]); + + return ( + + {children} + + ); +}; + +export default NotificationsProvider; diff --git a/web/ui/mantine-ui/src/state/useNotifications.ts b/web/ui/mantine-ui/src/state/useNotifications.ts new file mode 100644 index 0000000000..40a3f09206 --- /dev/null +++ b/web/ui/mantine-ui/src/state/useNotifications.ts @@ -0,0 +1,17 @@ +import { createContext, useContext } from 'react'; +import { Notification } from "../api/responseTypes/notifications"; + +export type NotificationsContextType = { + notifications: Notification[]; + isConnectionError: boolean; +}; + +const defaultContextValue: NotificationsContextType = { + notifications: [], + isConnectionError: false, +}; + +export const NotificationsContext = createContext(defaultContextValue); + +// Custom hook to access notifications context +export const useNotifications = () => useContext(NotificationsContext); diff --git a/web/web.go b/web/web.go index 6b0d9cd187..87e4164c58 100644 --- a/web/web.go +++ b/web/web.go @@ -59,6 +59,7 @@ import ( "github.com/prometheus/prometheus/template" "github.com/prometheus/prometheus/util/httputil" "github.com/prometheus/prometheus/util/netconnlimit" + "github.com/prometheus/prometheus/web/api" api_v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/prometheus/prometheus/web/ui" ) @@ -266,6 +267,8 @@ type Options struct { RuleManager *rules.Manager Notifier *notifier.Manager Version *PrometheusVersion + NotificationsGetter func() []api.Notification + NotificationsSub func() (<-chan api.Notification, func()) Flags map[string]string ListenAddresses []string @@ -376,6 +379,8 @@ func New(logger log.Logger, o *Options) *Handler { h.options.CORSOrigin, h.runtimeInfo, h.versionInfo, + h.options.NotificationsGetter, + h.options.NotificationsSub, o.Gatherer, o.Registerer, nil, From f9bbad1148db0300977cd666a76a9d5609c884b6 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 27 Sep 2024 13:51:50 +0200 Subject: [PATCH 065/137] Limit the number of SSE Subscribers to 16 by default Signed-off-by: Julien --- cmd/prometheus/main.go | 52 ++++++++++--------- docs/command-line/prometheus.md | 1 + web/api/notifications.go | 25 +++++---- web/api/notifications_test.go | 47 ++++++++++++++--- web/api/v1/api.go | 10 ++-- .../src/components/NotificationsProvider.tsx | 3 +- web/web.go | 2 +- 7 files changed, 94 insertions(+), 46 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index dd068b86c5..f39eba3c31 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -135,24 +135,25 @@ func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagCla type flagConfig struct { configFile string - agentStoragePath string - serverStoragePath string - notifier notifier.Options - forGracePeriod model.Duration - outageTolerance model.Duration - resendDelay model.Duration - maxConcurrentEvals int64 - web web.Options - scrape scrape.Options - tsdb tsdbOptions - agent agentOptions - lookbackDelta model.Duration - webTimeout model.Duration - queryTimeout model.Duration - queryConcurrency int - queryMaxSamples int - RemoteFlushDeadline model.Duration - nameEscapingScheme string + agentStoragePath string + serverStoragePath string + notifier notifier.Options + forGracePeriod model.Duration + outageTolerance model.Duration + resendDelay model.Duration + maxConcurrentEvals int64 + web web.Options + scrape scrape.Options + tsdb tsdbOptions + agent agentOptions + lookbackDelta model.Duration + webTimeout model.Duration + queryTimeout model.Duration + queryConcurrency int + queryMaxSamples int + RemoteFlushDeadline model.Duration + nameEscapingScheme string + maxNotificationsSubscribers int enableAutoReload bool autoReloadInterval model.Duration @@ -274,17 +275,13 @@ func main() { ) } - notifs := api.NewNotifications(prometheus.DefaultRegisterer) - cfg := flagConfig{ notifier: notifier.Options{ Registerer: prometheus.DefaultRegisterer, }, web: web.Options{ - Registerer: prometheus.DefaultRegisterer, - Gatherer: prometheus.DefaultGatherer, - NotificationsSub: notifs.Sub, - NotificationsGetter: notifs.Get, + Registerer: prometheus.DefaultRegisterer, + Gatherer: prometheus.DefaultGatherer, }, promlogConfig: promlog.Config{}, } @@ -319,6 +316,9 @@ func main() { a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners."). Default("512").IntVar(&cfg.web.MaxConnections) + a.Flag("web.max-notifications-subscribers", "Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close."). + Default("16").IntVar(&cfg.maxNotificationsSubscribers) + a.Flag("web.external-url", "The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically."). PlaceHolder("").StringVar(&cfg.prometheusURL) @@ -500,6 +500,10 @@ func main() { logger := promlog.New(&cfg.promlogConfig) + notifs := api.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) + cfg.web.NotificationsSub = notifs.Sub + cfg.web.NotificationsGetter = notifs.Get + if err := cfg.setFeatureListOptions(logger); err != nil { fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) os.Exit(1) diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 7737b50210..eacb45ad07 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -21,6 +21,7 @@ The Prometheus monitoring server | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | --web.read-timeout | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | | --web.max-connections | Maximum number of simultaneous connections across all listeners. | `512` | +| --web.max-notifications-subscribers | Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close. | `16` | | --web.external-url | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | | | --web.route-prefix | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | | | --web.user-assets | Path to static asset directory, available at /user. | | diff --git a/web/api/notifications.go b/web/api/notifications.go index 47f29f6ebe..976f0b0768 100644 --- a/web/api/notifications.go +++ b/web/api/notifications.go @@ -34,9 +34,10 @@ type Notification struct { // Notifications stores a list of Notification objects. // It also manages live subscribers that receive notifications via channels. type Notifications struct { - mu sync.Mutex - notifications []Notification - subscribers map[chan Notification]struct{} // Active subscribers. + mu sync.Mutex + notifications []Notification + subscribers map[chan Notification]struct{} // Active subscribers. + maxSubscribers int subscriberGauge prometheus.Gauge notificationsSent prometheus.Counter @@ -44,9 +45,10 @@ type Notifications struct { } // NewNotifications creates a new Notifications instance. -func NewNotifications(reg prometheus.Registerer) *Notifications { +func NewNotifications(maxSubscribers int, reg prometheus.Registerer) *Notifications { n := &Notifications{ - subscribers: make(map[chan Notification]struct{}), + subscribers: make(map[chan Notification]struct{}), + maxSubscribers: maxSubscribers, subscriberGauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "prometheus", Subsystem: "api", @@ -147,10 +149,16 @@ func (n *Notifications) Get() []Notification { // Sub allows a client to subscribe to live notifications. // It returns a channel where the subscriber will receive notifications and a function to unsubscribe. // Each subscriber has its own goroutine to handle notifications and prevent blocking. -func (n *Notifications) Sub() (<-chan Notification, func()) { +func (n *Notifications) Sub() (<-chan Notification, func(), bool) { + n.mu.Lock() + defer n.mu.Unlock() + + if len(n.subscribers) >= n.maxSubscribers { + return nil, nil, false + } + ch := make(chan Notification, 10) // Buffered channel to prevent blocking. - n.mu.Lock() // Add the new subscriber to the list. n.subscribers[ch] = struct{}{} n.subscriberGauge.Set(float64(len(n.subscribers))) @@ -159,7 +167,6 @@ func (n *Notifications) Sub() (<-chan Notification, func()) { for _, notification := range n.notifications { ch <- notification } - n.mu.Unlock() // Unsubscribe function to remove the channel from subscribers. unsubscribe := func() { @@ -172,5 +179,5 @@ func (n *Notifications) Sub() (<-chan Notification, func()) { n.subscriberGauge.Set(float64(len(n.subscribers))) } - return ch, unsubscribe + return ch, unsubscribe, true } diff --git a/web/api/notifications_test.go b/web/api/notifications_test.go index 7aa5961638..437ff1ec4b 100644 --- a/web/api/notifications_test.go +++ b/web/api/notifications_test.go @@ -23,7 +23,7 @@ import ( // TestNotificationLifecycle tests adding, modifying, and deleting notifications. func TestNotificationLifecycle(t *testing.T) { - notifs := NewNotifications(nil) + notifs := NewNotifications(10, nil) // Add a notification. notifs.AddNotification("Test Notification 1") @@ -47,10 +47,11 @@ func TestNotificationLifecycle(t *testing.T) { // TestSubscriberReceivesNotifications tests that a subscriber receives notifications, including modifications and deletions. func TestSubscriberReceivesNotifications(t *testing.T) { - notifs := NewNotifications(nil) + notifs := NewNotifications(10, nil) // Subscribe to notifications. - sub, unsubscribe := notifs.Sub() + sub, unsubscribe, ok := notifs.Sub() + require.True(t, ok) var wg sync.WaitGroup wg.Add(1) @@ -103,12 +104,14 @@ func TestSubscriberReceivesNotifications(t *testing.T) { // TestMultipleSubscribers tests that multiple subscribers receive notifications independently. func TestMultipleSubscribers(t *testing.T) { - notifs := NewNotifications(nil) + notifs := NewNotifications(10, nil) // Subscribe two subscribers to notifications. - sub1, unsubscribe1 := notifs.Sub() + sub1, unsubscribe1, ok1 := notifs.Sub() + require.True(t, ok1) - sub2, unsubscribe2 := notifs.Sub() + sub2, unsubscribe2, ok2 := notifs.Sub() + require.True(t, ok2) var wg sync.WaitGroup wg.Add(2) @@ -157,10 +160,11 @@ func TestMultipleSubscribers(t *testing.T) { // TestUnsubscribe tests that unsubscribing prevents further notifications from being received. func TestUnsubscribe(t *testing.T) { - notifs := NewNotifications(nil) + notifs := NewNotifications(10, nil) // Subscribe to notifications. - sub, unsubscribe := notifs.Sub() + sub, unsubscribe, ok := notifs.Sub() + require.True(t, ok) var wg sync.WaitGroup wg.Add(1) @@ -190,3 +194,30 @@ func TestUnsubscribe(t *testing.T) { require.Len(t, receivedNotifications, 1, "Expected 1 notification before unsubscribe.") require.Equal(t, "Test Notification 1", receivedNotifications[0].Text, "Unexpected notification text.") } + +// TestMaxSubscribers tests that exceeding the max subscribers limit prevents additional subscriptions. +func TestMaxSubscribers(t *testing.T) { + maxSubscribers := 2 + notifs := NewNotifications(maxSubscribers, nil) + + // Subscribe the maximum number of subscribers. + _, unsubscribe1, ok1 := notifs.Sub() + require.True(t, ok1, "Expected first subscription to succeed.") + + _, unsubscribe2, ok2 := notifs.Sub() + require.True(t, ok2, "Expected second subscription to succeed.") + + // Try to subscribe more than the max allowed. + _, _, ok3 := notifs.Sub() + require.False(t, ok3, "Expected third subscription to fail due to max subscriber limit.") + + // Unsubscribe one subscriber and try again. + unsubscribe1() + + _, unsubscribe4, ok4 := notifs.Sub() + require.True(t, ok4, "Expected subscription to succeed after unsubscribing a subscriber.") + + // Clean up the subscriptions. + unsubscribe2() + unsubscribe4() +} diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 5eadbdbe75..4589e14e02 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -215,7 +215,7 @@ type API struct { isAgent bool statsRenderer StatsRenderer notificationsGetter func() []api.Notification - notificationsSub func() (<-chan api.Notification, func()) + notificationsSub func() (<-chan api.Notification, func(), bool) remoteWriteHandler http.Handler remoteReadHandler http.Handler @@ -250,7 +250,7 @@ func NewAPI( runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, notificationsGetter func() []api.Notification, - notificationsSub func() (<-chan api.Notification, func()), + notificationsSub func() (<-chan api.Notification, func(), bool), gatherer prometheus.Gatherer, registerer prometheus.Registerer, statsRenderer StatsRenderer, @@ -1690,7 +1690,11 @@ func (api *API) notificationsSSE(w http.ResponseWriter, r *http.Request) { w.Header().Set("Connection", "keep-alive") // Subscribe to notifications. - notifications, unsubscribe := api.notificationsSub() + notifications, unsubscribe, ok := api.notificationsSub() + if !ok { + w.WriteHeader(http.StatusNoContent) + return + } defer unsubscribe() // Set up a flusher to push the response to the client. diff --git a/web/ui/mantine-ui/src/components/NotificationsProvider.tsx b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx index 73de54131e..44510061ed 100644 --- a/web/ui/mantine-ui/src/components/NotificationsProvider.tsx +++ b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx @@ -42,7 +42,8 @@ export const NotificationsProvider: React.FC<{ children: React.ReactNode }> = ({ eventSource.onerror = () => { eventSource.close(); - setIsConnectionError(true); + // We do not call setIsConnectionError(true), we only set it to true if + // the fallback API does not work either. setShouldFetchFromAPI(true); }; diff --git a/web/web.go b/web/web.go index 87e4164c58..724ca91051 100644 --- a/web/web.go +++ b/web/web.go @@ -268,7 +268,7 @@ type Options struct { Notifier *notifier.Manager Version *PrometheusVersion NotificationsGetter func() []api.Notification - NotificationsSub func() (<-chan api.Notification, func()) + NotificationsSub func() (<-chan api.Notification, func(), bool) Flags map[string]string ListenAddresses []string From e34563bfe0ac78d81d3147aed9e03789945e1c74 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 27 Sep 2024 15:58:41 +0200 Subject: [PATCH 066/137] Retry SSE connection unless max clients have been reached. This switches from the prehistoric EventSource API to the more modern fetch-event-source package. That packages gives us full control over the retries. It also gives us the opportunity to close the event source when the browser tab is hidden, saving resources. Signed-off-by: Julien --- web/api/v1/api.go | 4 ++ web/ui/mantine-ui/package.json | 1 + .../src/components/NotificationsProvider.tsx | 57 ++++++++++++------- web/ui/package-lock.json | 6 ++ 4 files changed, 48 insertions(+), 20 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 4589e14e02..d3cc7d718d 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1704,6 +1704,10 @@ func (api *API) notificationsSSE(w http.ResponseWriter, r *http.Request) { return } + // Flush the response to ensure the headers are immediately and eventSource + // onopen is triggered client-side. + flusher.Flush() + for { select { case notification := <-notifications: diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index ec8ef89026..aae8ba99b1 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -25,6 +25,7 @@ "@mantine/dates": "^7.11.2", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.11.2", + "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0-beta.0", diff --git a/web/ui/mantine-ui/src/components/NotificationsProvider.tsx b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx index 44510061ed..a331e524b0 100644 --- a/web/ui/mantine-ui/src/components/NotificationsProvider.tsx +++ b/web/ui/mantine-ui/src/components/NotificationsProvider.tsx @@ -3,6 +3,7 @@ import { useSettings } from '../state/settingsSlice'; import { NotificationsContext } from '../state/useNotifications'; import { Notification, NotificationsResult } from "../api/responseTypes/notifications"; import { useAPIQuery } from '../api/api'; +import { fetchEventSource } from '@microsoft/fetch-event-source'; export const NotificationsProvider: React.FC<{ children: React.ReactNode }> = ({ children }) => { const { pathPrefix } = useSettings(); @@ -24,31 +25,47 @@ export const NotificationsProvider: React.FC<{ children: React.ReactNode }> = ({ }, [data, isError]); useEffect(() => { - const eventSource = new EventSource(`${pathPrefix}/api/v1/notifications/live`); - - eventSource.onmessage = (event) => { - const notification: Notification = JSON.parse(event.data); - - setNotifications((prev: Notification[]) => { - const updatedNotifications = [...prev.filter((n: Notification) => n.text !== notification.text)]; - - if (notification.active) { - updatedNotifications.push(notification); + const controller = new AbortController(); + fetchEventSource(`${pathPrefix}/api/v1/notifications/live`, { + signal: controller.signal, + async onopen(response) { + if (response.ok) { + if (response.status === 200) { + setNotifications([]); + setIsConnectionError(false); + } else if (response.status === 204) { + controller.abort(); + setShouldFetchFromAPI(true); + } + } else { + setIsConnectionError(true); + throw new Error(`Unexpected response: ${response.status} ${response.statusText}`); } + }, + onmessage(event) { + const notification: Notification = JSON.parse(event.data); - return updatedNotifications; - }); - }; + setNotifications((prev: Notification[]) => { + const updatedNotifications = [...prev.filter((n: Notification) => n.text !== notification.text)]; - eventSource.onerror = () => { - eventSource.close(); - // We do not call setIsConnectionError(true), we only set it to true if - // the fallback API does not work either. - setShouldFetchFromAPI(true); - }; + if (notification.active) { + updatedNotifications.push(notification); + } + + return updatedNotifications; + }); + }, + onclose() { + throw new Error("Server closed the connection"); + }, + onerror() { + setIsConnectionError(true); + return 5000; + }, + }); return () => { - eventSource.close(); + controller.abort(); }; }, [pathPrefix]); diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 2dc1fcdfe8..49a9074806 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -39,6 +39,7 @@ "@mantine/dates": "^7.11.2", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.11.2", + "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0-beta.0", @@ -2255,6 +2256,11 @@ "react": "^18.2.0" } }, + "node_modules/@microsoft/fetch-event-source": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@microsoft/fetch-event-source/-/fetch-event-source-2.0.1.tgz", + "integrity": "sha512-W6CLUJ2eBMw3Rec70qrsEW0jOm/3twwJv21mrmj2yORiaVmVYGS4sSS5yUwvQc1ZlDLYGPnClVWmUUMagKNsfA==" + }, "node_modules/@nexucis/fuzzy": { "version": "0.5.1", "resolved": "https://registry.npmjs.org/@nexucis/fuzzy/-/fuzzy-0.5.1.tgz", From 105ab2e95afa786c7ad21be614a31ff45c143cbb Mon Sep 17 00:00:00 2001 From: Ayoub Mrini Date: Fri, 27 Sep 2024 18:13:51 +0200 Subject: [PATCH 067/137] fix(test): adjust defer invocations (#14996) Signed-off-by: machine424 --- storage/remote/read_test.go | 4 +++- tsdb/agent/db_test.go | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index d63cefc3fe..b78a8c6215 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -475,7 +475,9 @@ func TestSampleAndChunkQueryableClient(t *testing.T) { ) q, err := c.Querier(tc.mint, tc.maxt) require.NoError(t, err) - defer require.NoError(t, q.Close()) + defer func() { + require.NoError(t, q.Close()) + }() ss := q.Select(context.Background(), true, nil, tc.matchers...) require.NoError(t, err) diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index b31041b1b9..f940e19158 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -640,7 +640,9 @@ func Test_ExistingWAL_NextRef(t *testing.T) { // Create a new storage and see what nextRef is initialized to. db, err = Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) require.NoError(t, err) - defer require.NoError(t, db.Close()) + defer func() { + require.NoError(t, db.Close()) + }() require.Equal(t, uint64(seriesCount+histogramCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL") } From fd62dbc2918deea0ceae94758baf7a095b52dd5b Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Fri, 20 Sep 2024 15:47:06 +0100 Subject: [PATCH 068/137] Update chunk format docs with native histograms and OOO Signed-off-by: Fiona Liao --- tsdb/docs/format/chunks.md | 54 ++++++++++++++++++++++++++++++--- tsdb/docs/format/head_chunks.md | 4 +++ 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/tsdb/docs/format/chunks.md b/tsdb/docs/format/chunks.md index 8318e0a540..ae2548594d 100644 --- a/tsdb/docs/format/chunks.md +++ b/tsdb/docs/format/chunks.md @@ -36,7 +36,7 @@ in-file offset (lower 4 bytes) and segment sequence number (upper 4 bytes). Notes: * `` has 1 to 10 bytes. -* `encoding`: Currently either `XOR` or `histogram`. +* `encoding`: Currently either `XOR`, `histogram`, or `float histogram`. * `data`: See below for each encoding. ## XOR chunk data @@ -92,7 +92,7 @@ Notes: ├──────────────────────────┤ │ ... │ ├──────────────────────────┤ -│ Sample_n │ +│ sample_n │ └──────────────────────────┘ ``` @@ -107,9 +107,9 @@ Notes: #### Sample 1 data: ``` -┌────────────────────────┬───────────────────────────┬────────────────────────────────┬──────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┐ -│ ts_delta │ count_delta │ zero_count_delta │ sum_xor │ pos_bucket_0_delta │ ... │ pos_bucket_n_delta │ neg_bucket_0_delta │ ... │ neg_bucket_n_delta │ -└────────────────────────┴───────────────────────────┴────────────────────────────────┴──────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┘ +┌───────────────────────┬──────────────────────────┬───────────────────────────────┬──────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┐ +│ ts_delta │ count_delta │ zero_count_delta │ sum_xor │ pos_bucket_0_delta │ ... │ pos_bucket_n_delta │ neg_bucket_0_delta │ ... │ neg_bucket_n_delta │ +└───────────────────────┴──────────────────────────┴───────────────────────────────┴──────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┘ ``` #### Sample 2 data and following: @@ -142,3 +142,47 @@ Notes: * Note that buckets are inherently deltas between the current bucket and the previous bucket. Only `bucket_0` is an absolute count. * The chunk can have as few as one sample, i.e. sample 1 and following are optional. * Similarly, there could be down to zero spans and down to zero buckets. + +## Float histogram chunk data + +Float histograms have the same layout as histograms apart from the encoding of samples. + +### Samples data: + +``` +┌──────────────────────────┐ +│ sample_0 │ +├──────────────────────────┤ +│ sample_1 │ +├──────────────────────────┤ +│ sample_2 │ +├──────────────────────────┤ +│ ... │ +├──────────────────────────┤ +│ sample_n │ +└──────────────────────────┘ +``` + +#### Sample 0 data: + +``` +┌─────────────────┬─────────────────┬──────────────────────┬───────────────┬────────────────────────┬─────┬────────────────────────┬────────────────────────┬─────┬────────────────────────┐ +│ ts │ count │ zero_count │ sum │ pos_bucket_0 │ ... │ pos_bucket_n │ neg_bucket_0 │ ... │ neg_bucket_n │ +└─────────────────┴─────────────────┴──────────────────────┴───────────────┴────────────────────────┴─────┴────────────────────────┴────────────────────────┴─────┴────────────────────────┘ +``` + +#### Sample 1 data: + +``` +┌───────────────────────┬────────────────────────┬─────────────────────────────┬──────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┐ +│ ts_delta │ count_xor │ zero_count_xor │ sum_xor │ pos_bucket_0_xor │ ... │ pos_bucket_n_xor │ neg_bucket_0_xor │ ... │ neg_bucket_n_xor │ +└───────────────────────┴────────────────────────┴─────────────────────────────┴──────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┘ +``` + +#### Sample 2 data and following: + +``` +┌─────────────────────┬────────────────────────┬─────────────────────────────┬──────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┐ +│ ts_dod │ count_xor │ zero_count_xor │ sum_xor │ pos_bucket_0_xor │ ... │ pos_bucket_n_xor │ neg_bucket_0_xor │ ... │ neg_bucket_n_xor │ +└─────────────────────┴────────────────────────┴─────────────────────────────┴──────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┘ +``` diff --git a/tsdb/docs/format/head_chunks.md b/tsdb/docs/format/head_chunks.md index 5737f42058..7040dcf41a 100644 --- a/tsdb/docs/format/head_chunks.md +++ b/tsdb/docs/format/head_chunks.md @@ -37,3 +37,7 @@ is used while replaying the chunks. | series ref <8 byte> | mint <8 byte, uint64> | maxt <8 byte, uint64> | encoding <1 byte> | len | data │ CRC32 <4 byte> │ └─────────────────────┴───────────────────────┴───────────────────────┴───────────────────┴───────────────┴──────────────┴────────────────┘ ``` + +## OOO encoding + +Head chunks use the highest bit of the `encoding` field to indicate whether it is out-of-order (1) or not (0). This bit is not set for chunks in the on-disk blocks. From 97f32191576f768393e08cef7448f8747344a77a Mon Sep 17 00:00:00 2001 From: machine424 Date: Fri, 27 Sep 2024 13:31:44 +0200 Subject: [PATCH 069/137] test(discovery): add a Configs test showing that the custom unmarshalling/marshalling is broken. This went under the radar because the utils are never called directly. We usually marshall/unmarshal Configs as embeded in a struct using UnmarshalYAMLWithInlineConfigs/MarshalYAMLWithInlineConfigs which bypasses Configs' custom UnmarshalYAML/MarshalYAML Signed-off-by: machine424 --- discovery/discovery_test.go | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 discovery/discovery_test.go diff --git a/discovery/discovery_test.go b/discovery/discovery_test.go new file mode 100644 index 0000000000..af327195f2 --- /dev/null +++ b/discovery/discovery_test.go @@ -0,0 +1,36 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestConfigsCustomUnMarshalMarshal(t *testing.T) { + input := `static_configs: +- targets: + - foo:1234 + - bar:4321 +` + cfg := &Configs{} + err := yaml.UnmarshalStrict([]byte(input), cfg) + require.NoError(t, err) + + output, err := yaml.Marshal(cfg) + require.NoError(t, err) + require.Equal(t, input, string(output)) +} From b5569c40708429a0feabdba24596d80b8617fd81 Mon Sep 17 00:00:00 2001 From: machine424 Date: Fri, 27 Sep 2024 13:40:26 +0200 Subject: [PATCH 070/137] fix(discovery): adjust how type is retrieved in Configs' MarshalYAML/UnmarshalYAML Signed-off-by: machine424 --- discovery/discovery.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/discovery/discovery.go b/discovery/discovery.go index a91faf6c86..9a83df409b 100644 --- a/discovery/discovery.go +++ b/discovery/discovery.go @@ -109,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) { // UnmarshalYAML implements yaml.Unmarshaler. func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -124,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { // MarshalYAML implements yaml.Marshaler. func (c Configs) MarshalYAML() (interface{}, error) { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() From b826c43987639e481e8453c6ad99b7e258c63b53 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 30 Sep 2024 14:22:40 +0200 Subject: [PATCH 071/137] Calculate path prefix directly in initial settings Redux value (#14981) Without this, the page that is shown first renders once with an empty path prefix value, since the settings update takes a render cycle to complete. However, we only fetch certain data from the API exactly once for a given page, and not for every re-render with changed path prefix value (and we also wouldn't want to fetch it from the wrong location initially). This duplicates the served endpoint list once more, but exporting them from App.tsx would also have been dirty (hot reload only works when a file only exports one component and nothing else, thus there'd be a linter warning). Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/App.tsx | 32 ++------------------ web/ui/mantine-ui/src/state/settingsSlice.ts | 30 ++++++++++++++++-- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 3e3466825c..3bec30fa31 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -56,12 +56,11 @@ import TSDBStatusPage from "./pages/TSDBStatusPage"; import FlagsPage from "./pages/FlagsPage"; import ConfigPage from "./pages/ConfigPage"; import AgentPage from "./pages/AgentPage"; -import { Suspense, useEffect } from "react"; +import { Suspense } from "react"; import ErrorBoundary from "./components/ErrorBoundary"; import { ThemeSelector } from "./components/ThemeSelector"; import { Notifications } from "@mantine/notifications"; -import { useAppDispatch } from "./state/hooks"; -import { updateSettings, useSettings } from "./state/settingsSlice"; +import { useSettings } from "./state/settingsSlice"; import SettingsMenu from "./components/SettingsMenu"; import ReadinessWrapper from "./components/ReadinessWrapper"; import NotificationsProvider from "./components/NotificationsProvider"; @@ -172,37 +171,12 @@ const theme = createTheme({ }, }); -// This dynamically/generically determines the pathPrefix by stripping the first known -// endpoint suffix from the window location path. It works out of the box for both direct -// hosting and reverse proxy deployments with no additional configurations required. -const getPathPrefix = (path: string) => { - if (path.endsWith("/")) { - path = path.slice(0, -1); - } - - const pagePaths = [ - ...mainNavPages, - ...allStatusPages, - { path: "/agent" }, - ].map((p) => p.path); - - const pagePath = pagePaths.find((p) => path.endsWith(p)); - return path.slice(0, path.length - (pagePath || "").length); -}; - const navLinkXPadding = "md"; function App() { const [opened, { toggle }] = useDisclosure(); - const pathPrefix = getPathPrefix(window.location.pathname); - const dispatch = useAppDispatch(); - - useEffect(() => { - dispatch(updateSettings({ pathPrefix })); - }, [pathPrefix, dispatch]); - - const { agentMode, consolesLink } = useSettings(); + const { agentMode, consolesLink, pathPrefix } = useSettings(); const navLinks = ( <> diff --git a/web/ui/mantine-ui/src/state/settingsSlice.ts b/web/ui/mantine-ui/src/state/settingsSlice.ts index 1591c43882..ea744e0149 100644 --- a/web/ui/mantine-ui/src/state/settingsSlice.ts +++ b/web/ui/mantine-ui/src/state/settingsSlice.ts @@ -4,7 +4,7 @@ import { initializeFromLocalStorage } from "./initializeFromLocalStorage"; interface Settings { consolesLink: string | null; - lookbackDelta: string, + lookbackDelta: string; agentMode: boolean; ready: boolean; pathPrefix: string; @@ -30,6 +30,32 @@ export const localStorageKeyEnableSyntaxHighlighting = export const localStorageKeyEnableLinter = "settings.enableLinter"; export const localStorageKeyShowAnnotations = "settings.showAnnotations"; +// This dynamically/generically determines the pathPrefix by stripping the first known +// endpoint suffix from the window location path. It works out of the box for both direct +// hosting and reverse proxy deployments with no additional configurations required. +const getPathPrefix = (path: string) => { + if (path.endsWith("/")) { + path = path.slice(0, -1); + } + + const pagePaths = [ + "/query", + "/alerts", + "/targets", + "/rules", + "/service-discovery", + "/status", + "/tsdb-status", + "/flags", + "/config", + "/alertmanager-discovery", + "/agent", + ]; + + const pagePath = pagePaths.find((p) => path.endsWith(p)); + return path.slice(0, path.length - (pagePath || "").length); +}; + export const initialState: Settings = { consolesLink: GLOBAL_CONSOLES_LINK === "CONSOLES_LINK_PLACEHOLDER" || @@ -44,7 +70,7 @@ export const initialState: Settings = { GLOBAL_LOOKBACKDELTA === null ? "" : GLOBAL_LOOKBACKDELTA, - pathPrefix: "", + pathPrefix: getPathPrefix(window.location.pathname), useLocalTime: initializeFromLocalStorage( localStorageKeyUseLocalTime, false From 9bb7fab4ab8e31525984948defdee5d86c55e9d9 Mon Sep 17 00:00:00 2001 From: Levi Harrison Date: Mon, 30 Sep 2024 09:44:41 -0400 Subject: [PATCH 072/137] remove LeviHarrison as default maintainer (#15005) Signed-off-by: Levi Harrison --- MAINTAINERS.md | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 7f4153abc1..44c07f0633 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -2,7 +2,6 @@ General maintainers: * Bryan Boreham (bjboreham@gmail.com / @bboreham) -* Levi Harrison (levi@leviharrison.dev / @LeviHarrison) * Ayoub Mrini (ayoubmrini424@gmail.com / @machine424) * Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie) From 7a90d73fa63e9ea5cab357951ffcf32c9bc8bfdf Mon Sep 17 00:00:00 2001 From: bas smit Date: Wed, 18 Sep 2024 10:38:47 +0200 Subject: [PATCH 073/137] sd k8s: test for sidecar container support in endpoints This test is expected to fail, the followup will add the feature Signed-off-by: bas smit --- discovery/kubernetes/endpoints_test.go | 164 +++++++++++++++++++++ discovery/kubernetes/endpointslice_test.go | 162 ++++++++++++++++++++ 2 files changed, 326 insertions(+) diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 3ea98c5db9..c503448b21 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -1089,3 +1089,167 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) { }, }.Run(t) } + +func TestEndpointsDiscoverySidecarContainer(t *testing.T) { + objs := []runtime.Object{ + &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testsidecar", + Namespace: "default", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: "4.3.2.1", + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9000, + Protocol: v1.ProtocolTCP, + }, + { + Name: "initport", + Port: 9111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + InitContainers: []v1.Container{ + { + Name: "ic1", + Image: "ic1:latest", + Ports: []v1.ContainerPort{ + { + Name: "initport", + ContainerPort: 1111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + { + Name: "ic2", + Image: "ic2:latest", + Ports: []v1.ContainerPort{ + { + Name: "initport", + ContainerPort: 9111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testsidecar": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + { + "__address__": "4.3.2.1:9111", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_endpoint_port_name": "initport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_pod_container_image": "ic2:latest", + "__meta_kubernetes_pod_container_name": "ic2", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "9111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + { + "__address__": "4.3.2.1:1111", + "__meta_kubernetes_pod_container_image": "ic1:latest", + "__meta_kubernetes_pod_container_name": "ic1", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "1111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpoints_name": "testsidecar", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpoints/default/testsidecar", + }, + }, + }.Run(t) +} diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index c7e99b0a00..f7ecf994e2 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -1199,3 +1199,165 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) { }) } } + +func TestEndpointSliceDiscoverySidecarContainer(t *testing.T) { + objs := []runtime.Object{ + &v1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testsidecar", + Namespace: "default", + }, + AddressType: v1.AddressTypeIPv4, + Ports: []v1.EndpointPort{ + { + Name: strptr("testport"), + Port: int32ptr(9000), + Protocol: protocolptr(corev1.ProtocolTCP), + }, + { + Name: strptr("initport"), + Port: int32ptr(9111), + Protocol: protocolptr(corev1.ProtocolTCP), + }, + }, + Endpoints: []v1.Endpoint{ + { + Addresses: []string{"4.3.2.1"}, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: corev1.PodSpec{ + NodeName: "testnode", + InitContainers: []corev1.Container{ + { + Name: "ic1", + Image: "ic1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "initport", + ContainerPort: 1111, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + { + Name: "ic2", + Image: "ic2:latest", + Ports: []corev1.ContainerPort{ + { + Name: "initport", + ContainerPort: 9111, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + + n, _ := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpointslice/default/testsidecar": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Pod", + "__meta_kubernetes_endpointslice_address_target_name": "testpod", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + { + "__address__": "4.3.2.1:9111", + "__meta_kubernetes_endpointslice_address_target_kind": "Pod", + "__meta_kubernetes_endpointslice_address_target_name": "testpod", + "__meta_kubernetes_endpointslice_port": "9111", + "__meta_kubernetes_endpointslice_port_name": "initport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_pod_container_image": "ic2:latest", + "__meta_kubernetes_pod_container_name": "ic2", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "9111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + { + "__address__": "4.3.2.1:1111", + "__meta_kubernetes_pod_container_image": "ic1:latest", + "__meta_kubernetes_pod_container_name": "ic1", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "1111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testsidecar", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpointslice/default/testsidecar", + }, + }, + }.Run(t) +} From a10dc9298ecfe8e5b03b38f31232df83412c6ed5 Mon Sep 17 00:00:00 2001 From: bas smit Date: Wed, 18 Sep 2024 10:40:18 +0200 Subject: [PATCH 074/137] sd k8s: support sidecar containers in endpoint discovery Sidecar containers are a newish feature in k8s. They're implemented similar to init containers but actually stay running and allow you to delay startup of your application pod until the sidecar started (like init containers always do). This adds the ports of the sidecar container to the list of discovered endpoint(slice), allowing you to target those containers as well. The implementation is a copy of that of Pod discovery fixes: #14927 Signed-off-by: bas smit --- discovery/kubernetes/endpoints.go | 10 ++++++++-- discovery/kubernetes/endpointslice.go | 11 +++++++++-- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index c7a60ae6d3..542bc95edc 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -361,16 +361,19 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { if port.Port == cport.ContainerPort { ports := strconv.FormatUint(uint64(port.Port), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -411,7 +414,8 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { continue } - for _, c := range pe.pod.Spec.Containers { + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { @@ -428,6 +432,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), @@ -435,6 +440,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 2ac65ef414..1368303104 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -377,19 +377,23 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { if port.port() == nil { continue } + if *port.port() == cport.ContainerPort { ports := strconv.FormatUint(uint64(*port.port()), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(cport.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -417,7 +421,8 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou continue } - for _, c := range pe.pod.Spec.Containers { + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { @@ -437,6 +442,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), @@ -444,6 +450,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } From 73997289c3b2d4b2ec234e4e16541559a9b90f6e Mon Sep 17 00:00:00 2001 From: bas smit Date: Wed, 18 Sep 2024 23:21:31 +0200 Subject: [PATCH 075/137] tests: update discovery tests with new labael Previous commit added the pod_container_init label to discovery, so all the tests need to reflect that. Signed-off-by: bas smit --- discovery/kubernetes/endpoints_test.go | 4 ++++ discovery/kubernetes/endpointslice_test.go | 3 +++ 2 files changed, 7 insertions(+) diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index c503448b21..4af6889602 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -244,6 +244,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", @@ -259,6 +260,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -821,6 +823,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -1078,6 +1081,7 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index f7ecf994e2..cc92c7ddaa 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -291,6 +291,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", @@ -306,6 +307,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -986,6 +988,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ From 77d3b3aff3817ec27013a094dd0a5aab64626eed Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 1 Oct 2024 14:36:52 +0200 Subject: [PATCH 076/137] OTLP: Remove experimental word form OTLP receiver (#14894) The OTLP receiver can now considered stable. We've had it for longer than a year in main and has received constant improvements. Signed-off-by: Jesus Vazquez --- cmd/prometheus/main.go | 8 ++++---- docs/command-line/prometheus.md | 3 ++- docs/feature_flags.md | 8 -------- docs/querying/api.md | 4 ++-- web/api/v1/api.go | 2 +- 5 files changed, 9 insertions(+), 16 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index f39eba3c31..d8369770bc 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -182,9 +182,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { opts := strings.Split(f, ",") for _, o := range opts { switch o { - case "otlp-write-receiver": - c.web.EnableOTLPWriteReceiver = true - level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled") case "expand-external-labels": c.enableExpandExternalLabels = true level.Info(logger).Log("msg", "Experimental expand-external-labels enabled") @@ -345,6 +342,9 @@ func main() { a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())). Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs)) + a.Flag("web.enable-otlp-receiver", "Enable API endpoint accepting OTLP write requests."). + Default("false").BoolVar(&cfg.web.EnableOTLPWriteReceiver) + a.Flag("web.console.templates", "Path to the console template directory, available at /consoles."). Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath) @@ -475,7 +475,7 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index eacb45ad07..a179a2f9f1 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -29,6 +29,7 @@ The Prometheus monitoring server | --web.enable-admin-api | Enable API endpoints for admin control actions. | `false` | | --web.enable-remote-write-receiver | Enable API endpoint accepting remote write requests. | `false` | | --web.remote-write-receiver.accepted-protobuf-messages | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` | +| --web.enable-otlp-receiver | Enable API endpoint accepting OTLP write requests. | `false` | | --web.console.templates | Path to the console template directory, available at /consoles. | `consoles` | | --web.console.libraries | Path to the console library directory. | `console_libraries` | | --web.page-title | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | @@ -57,7 +58,7 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 1e9455a3fd..a3e2c0b9e9 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -147,14 +147,6 @@ This should **only** be applied to metrics that currently produce such labels. regex: (\d+)\.0+;.*_bucket ``` -## OTLP Receiver - -`--enable-feature=otlp-write-receiver` - -The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes. -Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features -won't work when you push OTLP metrics. - ## Experimental PromQL functions `--enable-feature=promql-experimental-functions` diff --git a/docs/querying/api.md b/docs/querying/api.md index e32c8ecaf5..714438398b 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -1388,8 +1388,8 @@ is not considered an efficient way of ingesting samples. Use it with caution for specific low-volume use cases. It is not suitable for replacing the ingestion via scraping. -Enable the OTLP receiver by the feature flag -`--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver +Enable the OTLP receiver by setting +`--web.enable-otlp-receiver`. When enabled, the OTLP receiver endpoint is `/api/v1/otlp/v1/metrics`. *New in v2.47* diff --git a/web/api/v1/api.go b/web/api/v1/api.go index d3cc7d718d..0279f727f1 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1750,7 +1750,7 @@ func (api *API) otlpWrite(w http.ResponseWriter, r *http.Request) { if api.otlpWriteHandler != nil { api.otlpWriteHandler.ServeHTTP(w, r) } else { - http.Error(w, "otlp write receiver needs to be enabled with --enable-feature=otlp-write-receiver", http.StatusNotFound) + http.Error(w, "otlp write receiver needs to be enabled with --web.enable-otlp-receiver", http.StatusNotFound) } } From c5c2566b8afcb77ec559d64afe10d2f7daa18236 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 1 Oct 2024 15:15:21 +0200 Subject: [PATCH 077/137] MAINTAINERS: Add Arthur as an otlptranslator maintainer (#15024) Signed-off-by: Jesus Vazquez --- MAINTAINERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 44c07f0633..de3f3c73b7 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -16,7 +16,7 @@ Maintainers for specific parts of the codebase: George Krajcsovits ( / @krajorama) * `storage` * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( / @npazosmendez), Alex Greenbank ( / @alexgreenbank) - * `otlptranslator`: Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) + * `otlptranslator`: Arthur Silva Sens ( / @ArthurSens), Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) * `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez) * `web` * `ui`: Julius Volz ( / @juliusv) From 4cb5f23c35a5ccfc691485d9db69aeca16d6a59f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Rabenstein?= Date: Tue, 1 Oct 2024 18:03:46 +0200 Subject: [PATCH 078/137] api: Improve doc comments for v1.MinTime and v1.MaxTime (#14986) api: Improve doc comments for v1.MinTime and v1.MaxTime While investigated something mostly unrelated, I got nerd-sniped by the calculation of v1.MinTime and v1.MaxTime. The seemingly magic number in there (62135596801) needed an explanation. While looking for it, I found out that the offsets used here are actually needlessly conservative. Since the timestamps are so far in the past or future, respectively, that there is no practical impact, except that the calculation is needlessly obfuscated. However, we won't change the values now to not cause any confusion for users of this code. Still, I think the doc comment should explain the circumstances so nobody gets nerd-sniped again as I did today. For the record: 62135596800 is the difference in seconds between 0001-01-01 00:00:00 (Unix time zero point) and 1971-01-01 00:00:00 (Go time zero point) in the Gregorian calendar. If "Prometheus time" were in seconds (not milliseconds), that difference would be relevant to prevent over-/underflow when converting from "Prometheus time" to "Go time". Signed-off-by: beorn7 --------- Signed-off-by: beorn7 --- web/api/v1/api.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 0279f727f1..46666af90c 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -834,12 +834,22 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { } var ( - // MinTime is the default timestamp used for the begin of optional time ranges. - // Exposed to let downstream projects to reference it. + // MinTime is the default timestamp used for the start of optional time ranges. + // Exposed to let downstream projects reference it. + // + // Historical note: This should just be time.Unix(math.MinInt64/1000, 0).UTC(), + // but it was set to a higher value in the past due to a misunderstanding. + // The value is still low enough for practical purposes, so we don't want + // to change it now, avoiding confusion for importers of this variable. MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC() // MaxTime is the default timestamp used for the end of optional time ranges. // Exposed to let downstream projects to reference it. + // + // Historical note: This should just be time.Unix(math.MaxInt64/1000, 0).UTC(), + // but it was set to a lower value in the past due to a misunderstanding. + // The value is still high enough for practical purposes, so we don't want + // to change it now, avoiding confusion for importers of this variable. MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC() minTimeFormatted = MinTime.Format(time.RFC3339Nano) From 06e7dd609243bd6d954372704de78986134442f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:16:14 +0000 Subject: [PATCH 079/137] Bump github.com/prometheus/common Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.57.0 to 0.60.0. - [Release notes](https://github.com/prometheus/common/releases) - [Changelog](https://github.com/prometheus/common/blob/main/RELEASE.md) - [Commits](https://github.com/prometheus/common/compare/v0.57.0...v0.60.0) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 14 ++++---- documentation/examples/remote_storage/go.sum | 36 ++++++++++---------- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 8ed5084d91..a1be5c9b4e 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,8 +8,8 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.6 - github.com/prometheus/client_golang v1.20.2 - github.com/prometheus/common v0.57.0 + github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/common v0.60.0 github.com/prometheus/prometheus v0.53.1 github.com/stretchr/testify v1.9.0 ) @@ -55,11 +55,11 @@ require ( go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect google.golang.org/grpc v1.65.0 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 1abeff7eb1..936b448d84 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= -github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY= -github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -323,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -344,20 +344,20 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -373,17 +373,17 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 6c90ed3af792626ad93c12f2778806ed0998a464 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:17:10 +0000 Subject: [PATCH 080/137] Bump the go-opentelemetry-io group with 9 updates Bumps the go-opentelemetry-io group with 9 updates: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/collector/pdata](https://github.com/open-telemetry/opentelemetry-collector) | `1.14.1` | `1.16.0` | | [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector) | `0.108.1` | `0.110.0` | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.53.0` | `0.55.0` | | [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | | [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | | [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) | `1.29.0` | `1.30.0` | Updates `go.opentelemetry.io/collector/pdata` from 1.14.1 to 1.16.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.14.1...pdata/v1.16.0) Updates `go.opentelemetry.io/collector/semconv` from 0.108.1 to 0.110.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.108.1...v0.110.0) Updates `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` from 0.53.0 to 0.55.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.53.0...zpages/v0.55.0) Updates `go.opentelemetry.io/otel` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) Updates `go.opentelemetry.io/otel/sdk` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) Updates `go.opentelemetry.io/otel/trace` from 1.29.0 to 1.30.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...v1.30.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/pdata dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/trace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] --- go.mod | 30 ++++++++++++++--------------- go.sum | 60 +++++++++++++++++++++++++++++----------------------------- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/go.mod b/go.mod index c3f6bbe749..4d0c98719b 100644 --- a/go.mod +++ b/go.mod @@ -62,15 +62,15 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.14.1 - go.opentelemetry.io/collector/semconv v0.108.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 - go.opentelemetry.io/otel v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 - go.opentelemetry.io/otel/sdk v1.29.0 - go.opentelemetry.io/otel/trace v1.29.0 + go.opentelemetry.io/collector/pdata v1.16.0 + go.opentelemetry.io/collector/semconv v0.110.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 + go.opentelemetry.io/otel v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 + go.opentelemetry.io/otel/sdk v1.30.0 + go.opentelemetry.io/otel/trace v1.30.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 @@ -82,8 +82,8 @@ require ( golang.org/x/time v0.6.0 golang.org/x/tools v0.24.0 google.golang.org/api v0.195.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed - google.golang.org/grpc v1.66.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/grpc v1.66.2 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -188,13 +188,13 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.26.0 // indirect + golang.org/x/crypto v0.27.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/term v0.24.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 6f31bec93f..73dafaa104 100644 --- a/go.sum +++ b/go.sum @@ -732,26 +732,26 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1IxMHQf6mUk= -go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= -go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= -go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto= +go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4= +go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk= +go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -782,8 +782,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -865,8 +865,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -963,8 +963,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1094,8 +1094,8 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1116,8 +1116,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 113332bbfba4020f02eae4672647b87bd63b8130 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:17:42 +0000 Subject: [PATCH 081/137] Bump github.com/gophercloud/gophercloud from 1.14.0 to 1.14.1 Bumps [github.com/gophercloud/gophercloud](https://github.com/gophercloud/gophercloud) from 1.14.0 to 1.14.1. - [Release notes](https://github.com/gophercloud/gophercloud/releases) - [Changelog](https://github.com/gophercloud/gophercloud/blob/v1.14.1/CHANGELOG.md) - [Commits](https://github.com/gophercloud/gophercloud/compare/v1.14.0...v1.14.1) --- updated-dependencies: - dependency-name: github.com/gophercloud/gophercloud dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c3f6bbe749..cfebeb52bc 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da github.com/google/uuid v1.6.0 - github.com/gophercloud/gophercloud v1.14.0 + github.com/gophercloud/gophercloud v1.14.1 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.29.4 diff --git a/go.sum b/go.sum index 6f31bec93f..e959cc629c 100644 --- a/go.sum +++ b/go.sum @@ -334,8 +334,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= -github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= -github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= +github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= From bb0382fbafdeef9b57f0b9693199bb092906defb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:17:56 +0000 Subject: [PATCH 082/137] Bump vitest from 2.0.5 to 2.1.1 in /web/ui Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 2.0.5 to 2.1.1. - [Release notes](https://github.com/vitest-dev/vitest/releases) - [Commits](https://github.com/vitest-dev/vitest/commits/v2.1.1/packages/vitest) --- updated-dependencies: - dependency-name: vitest dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 305 ++++++++++----------------------- 2 files changed, 87 insertions(+), 220 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b1..012d8bc4d5 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -68,6 +68,6 @@ "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", "vite": "^5.1.0", - "vitest": "^2.0.5" + "vitest": "^2.1.1" } } diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a9074806..7f13f28103 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -82,7 +82,7 @@ "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", "vite": "^5.1.0", - "vitest": "^2.0.5" + "vitest": "^2.1.1" } }, "mantine-ui/node_modules/eslint": { @@ -3385,14 +3385,13 @@ } }, "node_modules/@vitest/expect": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.0.5.tgz", - "integrity": "sha512-yHZtwuP7JZivj65Gxoi8upUN2OzHTi3zVfjwdpu2WrvCZPLwsJ2Ey5ILIPccoW23dd/zQBlJ4/dhi7DWNyXCpA==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.1.tgz", + "integrity": "sha512-YeueunS0HiHiQxk+KEOnq/QMzlUuOzbU1Go+PgAsHvvv3tUkJPm9xWt+6ITNTlzsMXUjmgm5T+U7KBPK2qQV6w==", "dev": true, - "license": "MIT", "dependencies": { - "@vitest/spy": "2.0.5", - "@vitest/utils": "2.0.5", + "@vitest/spy": "2.1.1", + "@vitest/utils": "2.1.1", "chai": "^5.1.1", "tinyrainbow": "^1.2.0" }, @@ -3400,12 +3399,38 @@ "url": "https://opencollective.com/vitest" } }, - "node_modules/@vitest/pretty-format": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.0.5.tgz", - "integrity": "sha512-h8k+1oWHfwTkyTkb9egzwNMfJAEx4veaPSnMeKbVSjp4euqGSbQlm5+6VHwTr7u4FJslVVsUG5nopCaAYdOmSQ==", + "node_modules/@vitest/mocker": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.1.tgz", + "integrity": "sha512-LNN5VwOEdJqCmJ/2XJBywB11DLlkbY0ooDJW3uRX5cZyYCrc4PI/ePX0iQhE3BiEGiQmK4GE7Q/PqCkkaiPnrA==", + "dev": true, + "dependencies": { + "@vitest/spy": "^2.1.0-beta.1", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.11" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@vitest/spy": "2.1.1", + "msw": "^2.3.5", + "vite": "^5.0.0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.1.tgz", + "integrity": "sha512-SjxPFOtuINDUW8/UkElJYQSFtnWX7tMksSGW0vfjxMneFqxVr8YJ979QpMbDW7g+BIiq88RAGDjf7en6rvLPPQ==", "dev": true, - "license": "MIT", "dependencies": { "tinyrainbow": "^1.2.0" }, @@ -3414,13 +3439,12 @@ } }, "node_modules/@vitest/runner": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.0.5.tgz", - "integrity": "sha512-TfRfZa6Bkk9ky4tW0z20WKXFEwwvWhRY+84CnSEtq4+3ZvDlJyY32oNTJtM7AW9ihW90tX/1Q78cb6FjoAs+ig==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.1.tgz", + "integrity": "sha512-uTPuY6PWOYitIkLPidaY5L3t0JJITdGTSwBtwMjKzo5O6RCOEncz9PUN+0pDidX8kTHYjO0EwUIvhlGpnGpxmA==", "dev": true, - "license": "MIT", "dependencies": { - "@vitest/utils": "2.0.5", + "@vitest/utils": "2.1.1", "pathe": "^1.1.2" }, "funding": { @@ -3428,14 +3452,13 @@ } }, "node_modules/@vitest/snapshot": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.0.5.tgz", - "integrity": "sha512-SgCPUeDFLaM0mIUHfaArq8fD2WbaXG/zVXjRupthYfYGzc8ztbFbu6dUNOblBG7XLMR1kEhS/DNnfCZ2IhdDew==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.1.tgz", + "integrity": "sha512-BnSku1WFy7r4mm96ha2FzN99AZJgpZOWrAhtQfoxjUU5YMRpq1zmHRq7a5K9/NjqonebO7iVDla+VvZS8BOWMw==", "dev": true, - "license": "MIT", "dependencies": { - "@vitest/pretty-format": "2.0.5", - "magic-string": "^0.30.10", + "@vitest/pretty-format": "2.1.1", + "magic-string": "^0.30.11", "pathe": "^1.1.2" }, "funding": { @@ -3443,11 +3466,10 @@ } }, "node_modules/@vitest/spy": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.0.5.tgz", - "integrity": "sha512-c/jdthAhvJdpfVuaexSrnawxZz6pywlTPe84LUB2m/4t3rl2fTo9NFGBG4oWgaD+FTgDDV8hJ/nibT7IfH3JfA==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.1.tgz", + "integrity": "sha512-ZM39BnZ9t/xZ/nF4UwRH5il0Sw93QnZXd9NAZGRpIgj0yvVwPpLd702s/Cx955rGaMlyBQkZJ2Ir7qyY48VZ+g==", "dev": true, - "license": "MIT", "dependencies": { "tinyspy": "^3.0.0" }, @@ -3456,14 +3478,12 @@ } }, "node_modules/@vitest/utils": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.0.5.tgz", - "integrity": "sha512-d8HKbqIcya+GR67mkZbrzhS5kKhtp8dQLcmRZLGTscGVg7yImT82cIrhtn2L8+VujWcy6KZweApgNmPsTAO/UQ==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.1.tgz", + "integrity": "sha512-Y6Q9TsI+qJ2CC0ZKj6VBb+T8UPz593N113nnUykqwANqhgf3QkZeHFlusgKLTqrnVHbj/XDKZcDHol+dxVT+rQ==", "dev": true, - "license": "MIT", "dependencies": { - "@vitest/pretty-format": "2.0.5", - "estree-walker": "^3.0.3", + "@vitest/pretty-format": "2.1.1", "loupe": "^3.1.1", "tinyrainbow": "^1.2.0" }, @@ -3625,7 +3645,6 @@ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", "dev": true, - "license": "MIT", "engines": { "node": ">=12" } @@ -3891,7 +3910,6 @@ "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -3953,7 +3971,6 @@ "resolved": "https://registry.npmjs.org/chai/-/chai-5.1.1.tgz", "integrity": "sha512-pT1ZgP8rPNqUgieVaEY+ryQr6Q4HXNg8Ei9UnLUrjN4IA7dvQC5JB+/kxVcPNDHyBcc/26CXPkbNzq3qwrOEKA==", "dev": true, - "license": "MIT", "dependencies": { "assertion-error": "^2.0.1", "check-error": "^2.1.1", @@ -3997,7 +4014,6 @@ "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", "dev": true, - "license": "MIT", "engines": { "node": ">= 16" } @@ -4286,7 +4302,6 @@ "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -4957,7 +4972,6 @@ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", "dev": true, - "license": "MIT", "dependencies": { "@types/estree": "^1.0.0" } @@ -5274,7 +5288,6 @@ "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz", "integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==", "dev": true, - "license": "MIT", "engines": { "node": "*" } @@ -7050,7 +7063,6 @@ "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.1.tgz", "integrity": "sha512-edNu/8D5MKVfGVFRhFf8aAxiTM6Wumfz5XsaatSxlD3w4R1d/WEKUTydCdPGbl9K7QG/Ca3GnDV2sIKIpXRQcw==", "dev": true, - "license": "MIT", "dependencies": { "get-func-name": "^2.0.1" } @@ -7078,7 +7090,6 @@ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.11.tgz", "integrity": "sha512-+Wri9p0QHMy+545hKww7YAu5NyzF8iomPL/RQazugQ9+Ez4Ic3mERMd8ZTX5rfK944j+560ZJi8iAwgak1Ac7A==", "dev": true, - "license": "MIT", "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0" } @@ -7123,7 +7134,8 @@ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/merge2": { "version": "1.4.1", @@ -7547,15 +7559,13 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/pathval": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==", "dev": true, - "license": "MIT", "engines": { "node": ">= 14.16" } @@ -8821,6 +8831,12 @@ "dev": true, "license": "MIT" }, + "node_modules/tinyexec": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.0.tgz", + "integrity": "sha512-tVGE0mVJPGb0chKhqmsoosjsS+qUnJVGJpZgsHYQcGoPlG3B51R3PouqTgEGH2Dc9jjFyOqOpix6ZHNMXp1FZg==", + "dev": true + }, "node_modules/tinypool": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.0.1.tgz", @@ -8836,17 +8852,15 @@ "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz", "integrity": "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=14.0.0" } }, "node_modules/tinyspy": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.0.tgz", - "integrity": "sha512-q5nmENpTHgiPVd1cJDDc9cVoYN5x4vCvwT3FMilvKPKneCBZAxn2YWQjDF0UMcE9k0Cay1gBiDfTMU0g+mPMQA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz", + "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=14.0.0" } @@ -9315,16 +9329,14 @@ } }, "node_modules/vite-node": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.0.5.tgz", - "integrity": "sha512-LdsW4pxj0Ot69FAoXZ1yTnA9bjGohr2yNBU7QKRxpz8ITSkhuDl6h3zS/tvgz4qrNjeRnvrWeXQ8ZF7Um4W00Q==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.1.tgz", + "integrity": "sha512-N/mGckI1suG/5wQI35XeR9rsMsPqKXzq1CdUndzVstBj/HvyxxGctwnK6WX43NGt5L3Z5tcRf83g4TITKJhPrA==", "dev": true, - "license": "MIT", "dependencies": { "cac": "^6.7.14", - "debug": "^4.3.5", + "debug": "^4.3.6", "pathe": "^1.1.2", - "tinyrainbow": "^1.2.0", "vite": "^5.0.0" }, "bin": { @@ -9338,30 +9350,29 @@ } }, "node_modules/vitest": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.0.5.tgz", - "integrity": "sha512-8GUxONfauuIdeSl5f9GTgVEpg5BTOlplET4WEDaeY2QBiN8wSm68vxN/tb5z405OwppfoCavnwXafiaYBC/xOA==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.1.tgz", + "integrity": "sha512-97We7/VC0e9X5zBVkvt7SGQMGrRtn3KtySFQG5fpaMlS+l62eeXRQO633AYhSTC3z7IMebnPPNjGXVGNRFlxBA==", "dev": true, - "license": "MIT", "dependencies": { - "@ampproject/remapping": "^2.3.0", - "@vitest/expect": "2.0.5", - "@vitest/pretty-format": "^2.0.5", - "@vitest/runner": "2.0.5", - "@vitest/snapshot": "2.0.5", - "@vitest/spy": "2.0.5", - "@vitest/utils": "2.0.5", + "@vitest/expect": "2.1.1", + "@vitest/mocker": "2.1.1", + "@vitest/pretty-format": "^2.1.1", + "@vitest/runner": "2.1.1", + "@vitest/snapshot": "2.1.1", + "@vitest/spy": "2.1.1", + "@vitest/utils": "2.1.1", "chai": "^5.1.1", - "debug": "^4.3.5", - "execa": "^8.0.1", - "magic-string": "^0.30.10", + "debug": "^4.3.6", + "magic-string": "^0.30.11", "pathe": "^1.1.2", "std-env": "^3.7.0", - "tinybench": "^2.8.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.0", "tinypool": "^1.0.0", "tinyrainbow": "^1.2.0", "vite": "^5.0.0", - "vite-node": "2.0.5", + "vite-node": "2.1.1", "why-is-node-running": "^2.3.0" }, "bin": { @@ -9376,8 +9387,8 @@ "peerDependencies": { "@edge-runtime/vm": "*", "@types/node": "^18.0.0 || >=20.0.0", - "@vitest/browser": "2.0.5", - "@vitest/ui": "2.0.5", + "@vitest/browser": "2.1.1", + "@vitest/ui": "2.1.1", "happy-dom": "*", "jsdom": "*" }, @@ -9402,150 +9413,6 @@ } } }, - "node_modules/vitest/node_modules/execa": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", - "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", - "dev": true, - "license": "MIT", - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^8.0.1", - "human-signals": "^5.0.0", - "is-stream": "^3.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^5.1.0", - "onetime": "^6.0.0", - "signal-exit": "^4.1.0", - "strip-final-newline": "^3.0.0" - }, - "engines": { - "node": ">=16.17" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/vitest/node_modules/get-stream": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", - "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/human-signals": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", - "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=16.17.0" - } - }, - "node_modules/vitest/node_modules/is-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", - "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/mimic-fn": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", - "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/npm-run-path": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", - "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-key": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/onetime": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", - "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "mimic-fn": "^4.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/path-key": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", - "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/vitest/node_modules/strip-final-newline": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", - "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/w3c-keyname": { "version": "2.2.8", "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", From a425dbfa72490c599ea235a0ef2278e56b40c005 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:18:11 +0000 Subject: [PATCH 083/137] Bump react-router-dom from 6.26.1 to 6.26.2 in /web/ui Bumps [react-router-dom](https://github.com/remix-run/react-router/tree/HEAD/packages/react-router-dom) from 6.26.1 to 6.26.2. - [Release notes](https://github.com/remix-run/react-router/releases) - [Changelog](https://github.com/remix-run/react-router/blob/main/packages/react-router-dom/CHANGELOG.md) - [Commits](https://github.com/remix-run/react-router/commits/react-router-dom@6.26.2/packages/react-router-dom) --- updated-dependencies: - dependency-name: react-router-dom dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 29 +++++++++++++---------------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b1..729da16a65 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -44,7 +44,7 @@ "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.1.2", - "react-router-dom": "^6.26.1", + "react-router-dom": "^6.26.2", "sanitize-html": "^2.13.0", "uplot": "^1.6.30", "uplot-react": "^1.2.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a9074806..78eaaeb047 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -58,7 +58,7 @@ "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.1.2", - "react-router-dom": "^6.26.1", + "react-router-dom": "^6.26.2", "sanitize-html": "^2.13.0", "uplot": "^1.6.30", "uplot-react": "^1.2.2", @@ -2364,10 +2364,9 @@ } }, "node_modules/@remix-run/router": { - "version": "1.19.1", - "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.19.1.tgz", - "integrity": "sha512-S45oynt/WH19bHbIXjtli6QmwNYvaz+vtnubvNpNDvUOoA/OWh6j1OikIP3G+v5GHdxyC6EXoChG3HgYGEUfcg==", - "license": "MIT", + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.19.2.tgz", + "integrity": "sha512-baiMx18+IMuD1yyvOGaHM9QrVUPGGG0jC+z+IPHnRJWUAUvaKuWKyE8gjDj2rzv3sz9zOGoRSPgeBVHRhZnBlA==", "engines": { "node": ">=14.0.0" } @@ -8113,12 +8112,11 @@ } }, "node_modules/react-router": { - "version": "6.26.1", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.26.1.tgz", - "integrity": "sha512-kIwJveZNwp7teQRI5QmwWo39A5bXRyqpH0COKKmPnyD2vBvDwgFXSqDUYtt1h+FEyfnE8eXr7oe0MxRzVwCcvQ==", - "license": "MIT", + "version": "6.26.2", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.26.2.tgz", + "integrity": "sha512-tvN1iuT03kHgOFnLPfLJ8V95eijteveqdOSk+srqfePtQvqCExB8eHOYnlilbOcyJyKnYkr1vJvf7YqotAJu1A==", "dependencies": { - "@remix-run/router": "1.19.1" + "@remix-run/router": "1.19.2" }, "engines": { "node": ">=14.0.0" @@ -8128,13 +8126,12 @@ } }, "node_modules/react-router-dom": { - "version": "6.26.1", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.26.1.tgz", - "integrity": "sha512-veut7m41S1fLql4pLhxeSW3jlqs+4MtjRLj0xvuCEXsxusJCbs6I8yn9BxzzDX2XDgafrccY6hwjmd/bL54tFw==", - "license": "MIT", + "version": "6.26.2", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.26.2.tgz", + "integrity": "sha512-z7YkaEW0Dy35T3/QKPYB1LjMK2R1fxnHO8kWpUMTBdfVzZrWOiY9a7CtN8HqdWtDUWd5FY6Dl8HFsqVwH4uOtQ==", "dependencies": { - "@remix-run/router": "1.19.1", - "react-router": "6.26.1" + "@remix-run/router": "1.19.2", + "react-router": "6.26.2" }, "engines": { "node": ">=14.0.0" From e05ab0c8f16407e08a1cfe60d7415dd4a5226534 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:18:21 +0000 Subject: [PATCH 084/137] Bump github.com/linode/linodego from 1.40.0 to 1.41.0 Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.40.0 to 1.41.0. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.40.0...v1.41.0) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index c3f6bbe749..9decdbe5aa 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.9 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.40.0 + github.com/linode/linodego v1.41.0 github.com/miekg/dns v1.1.62 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f @@ -190,11 +190,11 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.26.0 // indirect + golang.org/x/crypto v0.27.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/term v0.24.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 6f31bec93f..57c5590afb 100644 --- a/go.sum +++ b/go.sum @@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI= -github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM= +github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= +github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -782,8 +782,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -865,8 +865,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -963,8 +963,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 8907583524630c13dfae2c787b5a3efdf63587ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:18:22 +0000 Subject: [PATCH 085/137] Bump @codemirror/view from 6.33.0 to 6.34.1 in /web/ui Bumps [@codemirror/view](https://github.com/codemirror/view) from 6.33.0 to 6.34.1. - [Changelog](https://github.com/codemirror/view/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/view/compare/6.33.0...6.34.1) --- updated-dependencies: - dependency-name: "@codemirror/view" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 11 +++++------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b1..6b65bf9bb6 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -16,7 +16,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.4.1", - "@codemirror/view": "^6.33.0", + "@codemirror/view": "^6.34.1", "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.1", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 973cfca9e4..cf05c6f643 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -37,7 +37,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.29.1", + "@codemirror/view": "^6.34.1", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a9074806..5de754b32f 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -30,7 +30,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.4.1", - "@codemirror/view": "^6.33.0", + "@codemirror/view": "^6.34.1", "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.1", @@ -171,7 +171,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.29.1", + "@codemirror/view": "^6.34.1", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", @@ -997,10 +997,9 @@ } }, "node_modules/@codemirror/view": { - "version": "6.33.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.33.0.tgz", - "integrity": "sha512-AroaR3BvnjRW8fiZBalAaK+ZzB5usGgI014YKElYZvQdNH5ZIidHlO+cyf/2rWzyBFRkvG6VhiXeAEbC53P2YQ==", - "license": "MIT", + "version": "6.34.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.34.1.tgz", + "integrity": "sha512-t1zK/l9UiRqwUNPm+pdIT0qzJlzuVckbTEMVNFhfWkGiBQClstzg+78vedCvLSX0xJEZ6lwZbPpnljL7L6iwMQ==", "dependencies": { "@codemirror/state": "^6.4.0", "style-mod": "^4.1.0", From cbb4ed0c3bb6536c18231a44ea938e975f562d85 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:18:37 +0000 Subject: [PATCH 086/137] Bump @mantine/dates from 7.12.2 to 7.13.1 in /web/ui Bumps [@mantine/dates](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/dates) from 7.12.2 to 7.13.1. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.13.1/packages/@mantine/dates) --- updated-dependencies: - dependency-name: "@mantine/dates" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 29 +++++++++++++---------------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b1..900793e3ef 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -22,7 +22,7 @@ "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.11.2", "@mantine/core": "^7.11.2", - "@mantine/dates": "^7.11.2", + "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.11.2", "@microsoft/fetch-event-source": "^2.0.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a9074806..f64d86c5a2 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -36,7 +36,7 @@ "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.11.2", "@mantine/core": "^7.11.2", - "@mantine/dates": "^7.11.2", + "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.11.2", "@microsoft/fetch-event-source": "^2.0.1", @@ -2188,10 +2188,9 @@ } }, "node_modules/@mantine/core": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.12.2.tgz", - "integrity": "sha512-FrMHOKq4s3CiPIxqZ9xnVX7H4PEGNmbtHMvWO/0YlfPgoV0Er/N/DNJOFW1ys4WSnidPTayYeB41riyxxGOpRQ==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.13.1.tgz", + "integrity": "sha512-KH/WrcY/5pf3FxUUbtG77xyd7kfp6SRPAJFkxjFlg9kXroiQ7baljphY371CwPYPINERShUdvCQLpz4r4WMIHA==", "dependencies": { "@floating-ui/react": "^0.26.9", "clsx": "^2.1.1", @@ -2201,32 +2200,30 @@ "type-fest": "^4.12.0" }, "peerDependencies": { - "@mantine/hooks": "7.12.2", + "@mantine/hooks": "7.13.1", "react": "^18.2.0", "react-dom": "^18.2.0" } }, "node_modules/@mantine/dates": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.12.2.tgz", - "integrity": "sha512-qsDDl9qF80QLG1n6JiysyELAhbNLbV3qmXRAIU3GJLLxtZfyD9ntOUg0B64EpNl3Py4btXNo4yniFdu1JSUgwg==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.13.1.tgz", + "integrity": "sha512-KzzAehnftPAiGhJhOaRcWBuQ5+f5HrqnpNjH2/0KN+dv3gUfitAbapXOmCYOTdzS9Zk+RqqsD5VKvsbr1giXtQ==", "dependencies": { "clsx": "^2.1.1" }, "peerDependencies": { - "@mantine/core": "7.12.2", - "@mantine/hooks": "7.12.2", + "@mantine/core": "7.13.1", + "@mantine/hooks": "7.13.1", "dayjs": ">=1.0.0", "react": "^18.2.0", "react-dom": "^18.2.0" } }, "node_modules/@mantine/hooks": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.12.2.tgz", - "integrity": "sha512-dVMw8jpM0hAzc8e7/GNvzkk9N0RN/m+PKycETB3H6lJGuXJJSRR4wzzgQKpEhHwPccktDpvb4rkukKDq2jA8Fg==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.13.1.tgz", + "integrity": "sha512-Hfd4v380pPJUKDbARk+egdAanx7bpGZmaOn8G3QBZjwDufVopxip0WPkifUKUIMeYY1CTboa+1go9l56ZWrrSg==", "peerDependencies": { "react": "^18.2.0" } From 8f64d9144bbba0f83e9654a8b51ff9c0f1dcdd02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:19:00 +0000 Subject: [PATCH 087/137] Bump eslint-plugin-react-refresh from 0.4.11 to 0.4.12 in /web/ui Bumps [eslint-plugin-react-refresh](https://github.com/ArnaudBarre/eslint-plugin-react-refresh) from 0.4.11 to 0.4.12. - [Release notes](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/releases) - [Changelog](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/blob/main/CHANGELOG.md) - [Commits](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/compare/v0.4.11...v0.4.12) --- updated-dependencies: - dependency-name: eslint-plugin-react-refresh dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b1..80bdac7924 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -61,7 +61,7 @@ "@vitejs/plugin-react": "^4.2.1", "eslint": "^9.9.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", - "eslint-plugin-react-refresh": "^0.4.11", + "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", "jsdom": "^25.0.0", "postcss": "^8.4.35", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a9074806..6bd7a365f3 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -75,7 +75,7 @@ "@vitejs/plugin-react": "^4.2.1", "eslint": "^9.9.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", - "eslint-plugin-react-refresh": "^0.4.11", + "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", "jsdom": "^25.0.0", "postcss": "^8.4.35", @@ -4696,11 +4696,10 @@ } }, "node_modules/eslint-plugin-react-refresh": { - "version": "0.4.11", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.11.tgz", - "integrity": "sha512-wrAKxMbVr8qhXTtIKfXqAn5SAtRZt0aXxe5P23Fh4pUAdC6XEsybGLB8P0PI4j1yYqOgUEUlzKAGDfo7rJOjcw==", + "version": "0.4.12", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.12.tgz", + "integrity": "sha512-9neVjoGv20FwYtCP6CB1dzR1vr57ZDNOXst21wd2xJ/cTlM2xLq0GWVlSNTdMn/4BtP6cHYBMCSp1wFBJ9jBsg==", "dev": true, - "license": "MIT", "peerDependencies": { "eslint": ">=7" } From 50ccf5cfd712397b1058867111ddade3c35a16b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:19:11 +0000 Subject: [PATCH 088/137] Bump @types/lodash from 4.17.7 to 4.17.9 in /web/ui Bumps [@types/lodash](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/lodash) from 4.17.7 to 4.17.9. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/lodash) --- updated-dependencies: - dependency-name: "@types/lodash" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b1..9be7b951a4 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -34,7 +34,7 @@ "@tanstack/react-query": "^5.22.2", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", - "@types/lodash": "^4.17.7", + "@types/lodash": "^4.17.9", "@types/sanitize-html": "^2.13.0", "@uiw/react-codemirror": "^4.23.1", "clsx": "^2.1.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a9074806..e8b6a76b45 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -48,7 +48,7 @@ "@tanstack/react-query": "^5.22.2", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", - "@types/lodash": "^4.17.7", + "@types/lodash": "^4.17.9", "@types/sanitize-html": "^2.13.0", "@uiw/react-codemirror": "^4.23.1", "clsx": "^2.1.1", @@ -2971,10 +2971,9 @@ "license": "MIT" }, "node_modules/@types/lodash": { - "version": "4.17.7", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.7.tgz", - "integrity": "sha512-8wTvZawATi/lsmNu10/j2hk1KEP0IvjubqPE3cu1Xz7xfXXt5oCq3SNUz4fMIP4XGF9Ky+Ue2tBA3hcS7LSBlA==", - "license": "MIT" + "version": "4.17.9", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.9.tgz", + "integrity": "sha512-w9iWudx1XWOHW5lQRS9iKpK/XuRhnN+0T7HvdCCd802FYkT1AMTnxndJHGrNJwRoRHkslGr4S29tjm1cT7x/7w==" }, "node_modules/@types/node": { "version": "22.5.4", From 8d9850b7ffa42d5214736380b135c9d852dff451 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:21:30 +0000 Subject: [PATCH 089/137] Bump @eslint/js from 9.9.1 to 9.11.1 in /web/ui Bumps [@eslint/js](https://github.com/eslint/eslint/tree/HEAD/packages/js) from 9.9.1 to 9.11.1. - [Release notes](https://github.com/eslint/eslint/releases) - [Changelog](https://github.com/eslint/eslint/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint/eslint/commits/v9.11.1/packages/js) --- updated-dependencies: - dependency-name: "@eslint/js" dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index aae8ba99b1..c1083f6469 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -53,7 +53,7 @@ "devDependencies": { "@eslint/compat": "^1.1.1", "@eslint/eslintrc": "^3.1.0", - "@eslint/js": "^9.9.1", + "@eslint/js": "^9.11.1", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a9074806..9411dc851a 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -67,7 +67,7 @@ "devDependencies": { "@eslint/compat": "^1.1.1", "@eslint/eslintrc": "^3.1.0", - "@eslint/js": "^9.9.1", + "@eslint/js": "^9.11.1", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", @@ -145,6 +145,15 @@ } } }, + "mantine-ui/node_modules/eslint/node_modules/@eslint/js": { + "version": "9.9.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.9.1.tgz", + "integrity": "sha512-xIDQRsfg5hNBqHz04H1R3scSVwmI+KUbqjsQKHKQ1DAUSaUjYPReZZmS/5PNiKu1fUvzDd6H7DEDKACSEhu+TQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "mantine-ui/node_modules/globals": { "version": "15.9.0", "resolved": "https://registry.npmjs.org/globals/-/globals-15.9.0.tgz", @@ -1487,11 +1496,10 @@ } }, "node_modules/@eslint/js": { - "version": "9.9.1", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.9.1.tgz", - "integrity": "sha512-xIDQRsfg5hNBqHz04H1R3scSVwmI+KUbqjsQKHKQ1DAUSaUjYPReZZmS/5PNiKu1fUvzDd6H7DEDKACSEhu+TQ==", + "version": "9.11.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.11.1.tgz", + "integrity": "sha512-/qu+TWz8WwPWc7/HcIJKi+c+MOm46GdVaSlTTQcaqaL53+GsoA6MxWp5PtTx48qbSP7ylM1Kn7nhvkugfJvRSA==", "dev": true, - "license": "MIT", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } From 83efefd35db6b7779ee32fbd39d3ed33d5b1d940 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:21:44 +0000 Subject: [PATCH 090/137] Bump @types/jest from 29.5.12 to 29.5.13 in /web/ui Bumps [@types/jest](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/jest) from 29.5.12 to 29.5.13. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/jest) --- updated-dependencies: - dependency-name: "@types/jest" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 9 ++++----- web/ui/package.json | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 49a9074806..75efd90ecb 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -12,7 +12,7 @@ "module/*" ], "devDependencies": { - "@types/jest": "^29.5.12", + "@types/jest": "^29.5.13", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "eslint-config-prettier": "^9.1.0", @@ -2918,11 +2918,10 @@ } }, "node_modules/@types/jest": { - "version": "29.5.12", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.12.tgz", - "integrity": "sha512-eDC8bTvT/QhYdxJAulQikueigY5AsdBRH2yDKW3yveW7svY3+DzN84/2NUgkw10RTiJbWqZrTtoGVdYlvFJdLw==", + "version": "29.5.13", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.13.tgz", + "integrity": "sha512-wd+MVEZCHt23V0/L642O5APvspWply/rGY5BcW4SUETo2UzPU3Z26qr8jC2qxpimI2jjx9h7+2cj2FwIr01bXg==", "dev": true, - "license": "MIT", "dependencies": { "expect": "^29.0.0", "pretty-format": "^29.0.0" diff --git a/web/ui/package.json b/web/ui/package.json index 639ef70abf..6aa39d1a79 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -15,7 +15,7 @@ "module/*" ], "devDependencies": { - "@types/jest": "^29.5.12", + "@types/jest": "^29.5.13", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "eslint-config-prettier": "^9.1.0", From 0a61cc0363cd84009bb519340fd42071746d8ecd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:46:03 +0000 Subject: [PATCH 091/137] Bump actions/setup-node from 4.0.3 to 4.0.4 Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4.0.3 to 4.0.4. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](https://github.com/actions/setup-node/compare/1e60f620b9541d16bece96c5465dc8ee9832be0b...0a44ba7841725637a19e28fa30b79a866c81b0a6) --- updated-dependencies: - dependency-name: actions/setup-node dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9e614cb2db..2ef0e97a10 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -243,7 +243,7 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - name: Install nodejs - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4 with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" From bc82eacd2e5a274bae18d6e5a4bdf1bfba535d6f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:46:13 +0000 Subject: [PATCH 092/137] Bump actions/checkout from 4.1.7 to 4.2.0 in /scripts Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.7 to 4.2.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/692973e3d937129bcbf40652eb9f2f61becf3332...d632683dd7b4114ad314bca15554477dd762a938) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index a15cfc97f0..1c099932ba 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: From 886d705653f79909d1bfde24bbec3b09ff87b619 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:46:14 +0000 Subject: [PATCH 093/137] Bump github/codeql-action from 3.26.6 to 3.26.10 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.26.6 to 3.26.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/4dd16135b69a43b6c8efb853346f8437d92d3c93...e2b3eafc8d227b0241d48be5f425d47c2d750a13) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 89aa2ba29b..1466f4ec2b 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Initialize CodeQL - uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/autobuild@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/analyze@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 7e0ed7dc36..b5fbc7c946 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # tag=v3.26.6 + uses: github/codeql-action/upload-sarif@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # tag=v3.26.10 with: sarif_file: results.sarif From 2a69565cef4a229923f6f270e2e137158e802ae2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:46:19 +0000 Subject: [PATCH 094/137] Bump bufbuild/buf-setup-action from 1.39.0 to 1.43.0 Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.39.0 to 1.43.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/54abbed4fe8d8d45173eca4798b0c39a53a7b658...62ee92603c244ad0da98bab36a834a999a5329e6) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 3f6cf76e16..8f932b759b 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 + - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 632d38cb00..1b189926fb 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 + - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 From 6d4de289188d844bd2ffb4e106c3c3a19e2ce708 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:03:42 +0000 Subject: [PATCH 095/137] Bump @mantine/notifications from 7.12.2 to 7.13.1 in /web/ui Bumps [@mantine/notifications](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/notifications) from 7.12.2 to 7.13.1. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.13.1/packages/@mantine/notifications) --- updated-dependencies: - dependency-name: "@mantine/notifications" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 22 ++++++++++------------ 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 374e57174d..92795d17a7 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -24,7 +24,7 @@ "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", - "@mantine/notifications": "^7.11.2", + "@mantine/notifications": "^7.13.1", "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index fab7181df2..41f567fe8e 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -38,7 +38,7 @@ "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", - "@mantine/notifications": "^7.11.2", + "@mantine/notifications": "^7.13.1", "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", @@ -2229,26 +2229,24 @@ } }, "node_modules/@mantine/notifications": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.12.2.tgz", - "integrity": "sha512-gTvLHkoAZ42v5bZxibP9A50djp5ndEwumVhHSa7mxQ8oSS23tt3It/6hOqH7M+9kHY0a8s+viMiflUzTByA9qg==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.13.1.tgz", + "integrity": "sha512-Lc66wRar/nqADoaSlLHntREWbMlDDVs/Sabla2ac/V8jftLOnQpVPMefMpFVGYNJdhT3mG/9bguZV5K7pkjSXQ==", "dependencies": { - "@mantine/store": "7.12.2", + "@mantine/store": "7.13.1", "react-transition-group": "4.4.5" }, "peerDependencies": { - "@mantine/core": "7.12.2", - "@mantine/hooks": "7.12.2", + "@mantine/core": "7.13.1", + "@mantine/hooks": "7.13.1", "react": "^18.2.0", "react-dom": "^18.2.0" } }, "node_modules/@mantine/store": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.12.2.tgz", - "integrity": "sha512-NqL31sO/KcAETEWP/CiXrQOQNoE4168vZsxyXacQHGBueVMJa64WIDQtKLHrCnFRMws3vsXF02/OO4bH4XGcMQ==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.13.1.tgz", + "integrity": "sha512-/ZiVU8oFMkzSNrXqAvxb9ZfHWgVg7E8apUEQCzBh9sxgxdVoM9Y1+2YqOoi885hxskmPpkmGP+VGOJnQ6OKJig==", "peerDependencies": { "react": "^18.2.0" } From bcaf1084e63165d1fb969a3fb5e73edb7178afe8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:03:48 +0000 Subject: [PATCH 096/137] Bump @mantine/code-highlight from 7.12.2 to 7.13.1 in /web/ui Bumps [@mantine/code-highlight](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/code-highlight) from 7.12.2 to 7.13.1. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.13.1/packages/@mantine/code-highlight) --- updated-dependencies: - dependency-name: "@mantine/code-highlight" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 13 ++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 374e57174d..77fc52c209 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -20,7 +20,7 @@ "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.1", - "@mantine/code-highlight": "^7.11.2", + "@mantine/code-highlight": "^7.13.1", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index fab7181df2..9c387bad7f 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -34,7 +34,7 @@ "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.1", - "@mantine/code-highlight": "^7.11.2", + "@mantine/code-highlight": "^7.13.1", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", @@ -2172,17 +2172,16 @@ } }, "node_modules/@mantine/code-highlight": { - "version": "7.12.2", - "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.12.2.tgz", - "integrity": "sha512-eVVA6ZmtV2qV60qiQW3wvFbs0ryCmzrCJaqU4GV0D+6lGVn8mwbbo36+Jt4Qz/6FrswPD99ALRBlOwHDJe0P8A==", - "license": "MIT", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.13.1.tgz", + "integrity": "sha512-7Iz6ymlTFf8hRu7OBUDOaevr2cnOPtktnDJ+9KtYibA7iZoaMxtv7CfarhfcYghDdPK9HOIQpAJkbzD5NgwjYQ==", "dependencies": { "clsx": "^2.1.1", "highlight.js": "^11.9.0" }, "peerDependencies": { - "@mantine/core": "7.12.2", - "@mantine/hooks": "7.12.2", + "@mantine/core": "7.13.1", + "@mantine/hooks": "7.13.1", "react": "^18.2.0", "react-dom": "^18.2.0" } From f24e2109ad4edf20824ab4f80f4f514436189ad2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:09:11 +0000 Subject: [PATCH 097/137] Bump @uiw/react-codemirror from 4.23.1 to 4.23.3 in /web/ui Bumps [@uiw/react-codemirror](https://github.com/uiwjs/react-codemirror) from 4.23.1 to 4.23.3. - [Release notes](https://github.com/uiwjs/react-codemirror/releases) - [Commits](https://github.com/uiwjs/react-codemirror/compare/v4.23.1...v4.23.3) --- updated-dependencies: - dependency-name: "@uiw/react-codemirror" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 0e57908ecd..8681a68d5b 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -36,7 +36,7 @@ "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.9", "@types/sanitize-html": "^2.13.0", - "@uiw/react-codemirror": "^4.23.1", + "@uiw/react-codemirror": "^4.23.3", "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 5c9265a2a2..bf024bb32e 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -50,7 +50,7 @@ "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.9", "@types/sanitize-html": "^2.13.0", - "@uiw/react-codemirror": "^4.23.1", + "@uiw/react-codemirror": "^4.23.3", "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", @@ -3299,10 +3299,9 @@ } }, "node_modules/@uiw/codemirror-extensions-basic-setup": { - "version": "4.23.1", - "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.1.tgz", - "integrity": "sha512-l/1iBZt3Ao9ElUvUvA0CI8bLcGw0kgV0976l1u3psYMfKYJl5TwSHn6JOeSt/iCq/13exp1f7u+zFMRwtzeinw==", - "license": "MIT", + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.3.tgz", + "integrity": "sha512-nEMjgbCyeLx+UQgOGAAoUWYFE34z5TlyaKNszuig/BddYFDb0WKcgmC37bDFxR2dZssf3K/lwGWLpXnGKXePbA==", "dependencies": { "@codemirror/autocomplete": "^6.0.0", "@codemirror/commands": "^6.0.0", @@ -3326,16 +3325,15 @@ } }, "node_modules/@uiw/react-codemirror": { - "version": "4.23.1", - "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.1.tgz", - "integrity": "sha512-OUrBY/7gvmiolgP4m9UlsGAzNce9YEzmDvPPAc+g27q+BZEJYeWQCzqtjtXfL7OkwQcZ0Aea2DuUUZRUTTIyxg==", - "license": "MIT", + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.3.tgz", + "integrity": "sha512-TBBLUbeqXmfQSfO+f3rPNOAb+QXbSm7KPB64FHQWLGg2WJNbpOhjLOWMyL+C4ZP3aSCNc2Y5aftEK1vp3wCKTA==", "dependencies": { "@babel/runtime": "^7.18.6", "@codemirror/commands": "^6.1.0", "@codemirror/state": "^6.1.1", "@codemirror/theme-one-dark": "^6.0.0", - "@uiw/codemirror-extensions-basic-setup": "4.23.1", + "@uiw/codemirror-extensions-basic-setup": "4.23.3", "codemirror": "^6.0.0" }, "funding": { From 1da185244e64b3c68b63f9e3cb412d9a87aa7233 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:09:28 +0000 Subject: [PATCH 098/137] Bump @tanstack/react-query from 5.53.2 to 5.59.0 in /web/ui Bumps [@tanstack/react-query](https://github.com/TanStack/query/tree/HEAD/packages/react-query) from 5.53.2 to 5.59.0. - [Release notes](https://github.com/TanStack/query/releases) - [Commits](https://github.com/TanStack/query/commits/v5.59.0/packages/react-query) --- updated-dependencies: - dependency-name: "@tanstack/react-query" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 0e57908ecd..00265c28ac 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -31,7 +31,7 @@ "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^2.47.0", - "@tanstack/react-query": "^5.22.2", + "@tanstack/react-query": "^5.59.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.9", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 5c9265a2a2..5cc184069d 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -45,7 +45,7 @@ "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^2.47.0", - "@tanstack/react-query": "^5.22.2", + "@tanstack/react-query": "^5.59.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.9", @@ -2705,22 +2705,20 @@ } }, "node_modules/@tanstack/query-core": { - "version": "5.53.2", - "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.53.2.tgz", - "integrity": "sha512-gCsABpRrYfLsmwcQ0JCE5I3LOQ9KYrDDSnseUDP3T7ukV8E7+lhlHDJS4Gegt1TSZCsxKhc1J5A7TkF5ePjDUQ==", - "license": "MIT", + "version": "5.59.0", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.59.0.tgz", + "integrity": "sha512-WGD8uIhX6/deH/tkZqPNcRyAhDUqs729bWKoByYHSogcshXfFbppOdTER5+qY7mFvu8KEFJwT0nxr8RfPTVh0Q==", "funding": { "type": "github", "url": "https://github.com/sponsors/tannerlinsley" } }, "node_modules/@tanstack/react-query": { - "version": "5.53.2", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.53.2.tgz", - "integrity": "sha512-ZxG/rspElkfqg2LElnNtsNgPtiCZ4Wl2XY43bATQqPvNgyrhzbCFzCjDwSQy9fJhSiDVALSlxYS8YOIiToqQmg==", - "license": "MIT", + "version": "5.59.0", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.59.0.tgz", + "integrity": "sha512-YDXp3OORbYR+8HNQx+lf4F73NoiCmCcSvZvgxE29OifmQFk0sBlO26NWLHpcNERo92tVk3w+JQ53/vkcRUY1hA==", "dependencies": { - "@tanstack/query-core": "5.53.2" + "@tanstack/query-core": "5.59.0" }, "funding": { "type": "github", From 0e2623910f1a7f091eec4dcf5b65bd2d9e65105b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:13:09 +0000 Subject: [PATCH 099/137] Bump jsdom from 25.0.0 to 25.0.1 in /web/ui Bumps [jsdom](https://github.com/jsdom/jsdom) from 25.0.0 to 25.0.1. - [Release notes](https://github.com/jsdom/jsdom/releases) - [Changelog](https://github.com/jsdom/jsdom/blob/main/Changelog.md) - [Commits](https://github.com/jsdom/jsdom/compare/25.0.0...25.0.1) --- updated-dependencies: - dependency-name: jsdom dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 103 +++++++++++---------------------- 2 files changed, 34 insertions(+), 71 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index e8244022b0..7181d7bb31 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -63,7 +63,7 @@ "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", - "jsdom": "^25.0.0", + "jsdom": "^25.0.1", "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index b580a3aa9c..f2765bdb42 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -77,7 +77,7 @@ "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", - "jsdom": "^25.0.0", + "jsdom": "^25.0.1", "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", @@ -4206,25 +4206,17 @@ } }, "node_modules/cssstyle": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.0.1.tgz", - "integrity": "sha512-8ZYiJ3A/3OkDd093CBT/0UKDWry7ak4BdPTFP2+QEP7cmhouyq/Up709ASSj2cK02BbZiMgk7kYjZNS4QP5qrQ==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.1.0.tgz", + "integrity": "sha512-h66W1URKpBS5YMI/V8PyXvTMFT8SupJ1IzoIV8IeBC/ji8WVmrO8dGlTi+2dh6whmdk6BiKJLD/ZBkhWbcg6nA==", "dev": true, - "license": "MIT", "dependencies": { - "rrweb-cssom": "^0.6.0" + "rrweb-cssom": "^0.7.1" }, "engines": { "node": ">=18" } }, - "node_modules/cssstyle/node_modules/rrweb-cssom": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.6.0.tgz", - "integrity": "sha512-APM0Gt1KoXBz0iIkkdB/kfvGOwC4UuJFeG/c+yV7wSc7q96cG/kJ0HiYCnzivD9SB53cLV1MlHFNfOuPaadYSw==", - "dev": true, - "license": "MIT" - }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", @@ -6848,13 +6840,12 @@ } }, "node_modules/jsdom": { - "version": "25.0.0", - "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-25.0.0.tgz", - "integrity": "sha512-OhoFVT59T7aEq75TVw9xxEfkXgacpqAhQaYgP9y/fDqWQCMB/b1H66RfmPm/MaeaAIU9nDwMOVTlPN51+ao6CQ==", + "version": "25.0.1", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-25.0.1.tgz", + "integrity": "sha512-8i7LzZj7BF8uplX+ZyOlIz86V6TAsSs+np6m1kpW9u0JWi4z/1t+FzcK1aek+ybTnAC4KhBL4uXCNT0wcUIeCw==", "dev": true, - "license": "MIT", "dependencies": { - "cssstyle": "^4.0.1", + "cssstyle": "^4.1.0", "data-urls": "^5.0.0", "decimal.js": "^10.4.3", "form-data": "^4.0.0", @@ -6867,7 +6858,7 @@ "rrweb-cssom": "^0.7.1", "saxes": "^6.0.0", "symbol-tree": "^3.2.4", - "tough-cookie": "^4.1.4", + "tough-cookie": "^5.0.0", "w3c-xmlserializer": "^5.0.0", "webidl-conversions": "^7.0.0", "whatwg-encoding": "^3.1.1", @@ -7919,13 +7910,6 @@ "node": ">= 8" } }, - "node_modules/psl": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", - "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==", - "dev": true, - "license": "MIT" - }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -7954,13 +7938,6 @@ "license": "MIT", "peer": true }, - "node_modules/querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", - "dev": true, - "license": "MIT" - }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -8247,13 +8224,6 @@ "node": ">=0.10.0" } }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", - "dev": true, - "license": "MIT" - }, "node_modules/reselect": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.1.tgz", @@ -8857,6 +8827,24 @@ "node": ">=14.0.0" } }, + "node_modules/tldts": { + "version": "6.1.48", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-6.1.48.tgz", + "integrity": "sha512-SPbnh1zaSzi/OsmHb1vrPNnYuwJbdWjwo5TbBYYMlTtH3/1DSb41t8bcSxkwDmmbG2q6VLPVvQc7Yf23T+1EEw==", + "dev": true, + "dependencies": { + "tldts-core": "^6.1.48" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "6.1.48", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-6.1.48.tgz", + "integrity": "sha512-3gD9iKn/n2UuFH1uilBviK9gvTNT6iYwdqrj1Vr5mh8FuelvpRNaYVH4pNYqUgOGU4aAdL9X35eLuuj0gRsx+A==", + "dev": true + }, "node_modules/tmpl": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", @@ -8889,19 +8877,15 @@ } }, "node_modules/tough-cookie": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", - "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-5.0.0.tgz", + "integrity": "sha512-FRKsF7cz96xIIeMZ82ehjC3xW2E+O2+v11udrDYewUbszngYhsGa8z6YUMMzO9QJZzzyd0nGGXnML/TReX6W8Q==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { - "psl": "^1.1.33", - "punycode": "^2.1.1", - "universalify": "^0.2.0", - "url-parse": "^1.5.3" + "tldts": "^6.1.32" }, "engines": { - "node": ">=6" + "node": ">=16" } }, "node_modules/tr46": { @@ -9041,16 +9025,6 @@ "dev": true, "license": "MIT" }, - "node_modules/universalify": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", - "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4.0.0" - } - }, "node_modules/update-browserslist-db": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", @@ -9111,17 +9085,6 @@ "punycode": "^2.1.0" } }, - "node_modules/url-parse": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", - "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, "node_modules/use-callback-ref": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.2.tgz", From 1e18e5c6613f3684994873b19c6ef41f1c616877 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:13:57 +0000 Subject: [PATCH 100/137] Bump vite from 5.4.2 to 5.4.8 in /web/ui Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.4.2 to 5.4.8. - [Release notes](https://github.com/vitejs/vite/releases) - [Changelog](https://github.com/vitejs/vite/blob/v5.4.8/packages/vite/CHANGELOG.md) - [Commits](https://github.com/vitejs/vite/commits/v5.4.8/packages/vite) --- updated-dependencies: - dependency-name: vite dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 13 ++++++------- web/ui/package.json | 2 +- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index e8244022b0..728edf8b0b 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -67,7 +67,7 @@ "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", - "vite": "^5.1.0", + "vite": "^5.4.8", "vitest": "^2.1.1" } } diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index b580a3aa9c..63f1064ad5 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -19,7 +19,7 @@ "prettier": "^3.3.3", "ts-jest": "^29.2.2", "typescript": "^5.6.2", - "vite": "^5.1.0" + "vite": "^5.4.8" } }, "mantine-ui": { @@ -81,7 +81,7 @@ "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", - "vite": "^5.1.0", + "vite": "^5.4.8", "vitest": "^2.1.1" } }, @@ -9261,14 +9261,13 @@ } }, "node_modules/vite": { - "version": "5.4.2", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.2.tgz", - "integrity": "sha512-dDrQTRHp5C1fTFzcSaMxjk6vdpKvT+2/mIdE07Gw2ykehT49O0z/VHS3zZ8iV/Gh8BJJKHWOe5RjaNrW5xf/GA==", + "version": "5.4.8", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.8.tgz", + "integrity": "sha512-FqrItQ4DT1NC4zCUqMB4c4AZORMKIa0m8/URVCZ77OZ/QSNeJ54bU1vrFADbDsuwfIPcgknRkmqakQcgnL4GiQ==", "dev": true, - "license": "MIT", "dependencies": { "esbuild": "^0.21.3", - "postcss": "^8.4.41", + "postcss": "^8.4.43", "rollup": "^4.20.0" }, "bin": { diff --git a/web/ui/package.json b/web/ui/package.json index 639ef70abf..c65e3109d2 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -22,6 +22,6 @@ "prettier": "^3.3.3", "ts-jest": "^29.2.2", "typescript": "^5.6.2", - "vite": "^5.1.0" + "vite": "^5.4.8" } } From fc01573daa405a602530cb778e14d1c7f8007a9e Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 2 Oct 2024 06:30:13 +0200 Subject: [PATCH 101/137] Allow blank issue reports again I frequently find myself in the situation where the standard bug issue template fields are all irrelevant for what I want to report, and then I have to first shoehorn my info into the template somehow, save the issue, edit it, and remove all the unnecessary parts. This demotivates me from filing casual issues, e.g. when I see a CI test fail. We should have a way of still filing custom issues without all the templatey bits. Signed-off-by: Julius Volz --- .github/ISSUE_TEMPLATE/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f4d17b3596..bb4e2d24c9 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,4 @@ -blank_issues_enabled: false +blank_issues_enabled: true contact_links: - name: Prometheus Community Support url: https://prometheus.io/community/ From 4782b75d114618789fd3992856f27b9d974711cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:48:12 +0000 Subject: [PATCH 102/137] Bump @codemirror/autocomplete from 6.18.0 to 6.18.1 in /web/ui Bumps [@codemirror/autocomplete](https://github.com/codemirror/autocomplete) from 6.18.0 to 6.18.1. - [Changelog](https://github.com/codemirror/autocomplete/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/autocomplete/compare/6.18.0...6.18.1) --- updated-dependencies: - dependency-name: "@codemirror/autocomplete" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 11 +++++------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 3cca4a2fc1..9ee212a373 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -12,7 +12,7 @@ "test": "vitest" }, "dependencies": { - "@codemirror/autocomplete": "^6.18.0", + "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.4.1", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index cf05c6f643..3e459e83a0 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -33,7 +33,7 @@ "lru-cache": "^11.0.1" }, "devDependencies": { - "@codemirror/autocomplete": "^6.17.0", + "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index d9d867e524..c2a4860aed 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -26,7 +26,7 @@ "name": "@prometheus-io/mantine-ui", "version": "0.300.0-beta.0", "dependencies": { - "@codemirror/autocomplete": "^6.18.0", + "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.4.1", @@ -167,7 +167,7 @@ "lru-cache": "^11.0.1" }, "devDependencies": { - "@codemirror/autocomplete": "^6.17.0", + "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", @@ -913,10 +913,9 @@ "peer": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.18.0", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.0.tgz", - "integrity": "sha512-5DbOvBbY4qW5l57cjDsmmpDh3/TeK1vXfTHa+BUMrRzdWdcxKZ4U4V7vQaTtOpApNU4kLS4FQ6cINtLg245LXA==", - "license": "MIT", + "version": "6.18.1", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.1.tgz", + "integrity": "sha512-iWHdj/B1ethnHRTwZj+C1obmmuCzquH29EbcKr0qIjA9NfDeBDJ7vs+WOHsFeLeflE4o+dHfYndJloMKHUkWUA==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", From ab808f6e646e901a4b7d6cd953fc161945d02d72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:07:01 +0000 Subject: [PATCH 103/137] Bump eslint from 9.9.1 to 9.11.1 in /web/ui Bumps [eslint](https://github.com/eslint/eslint) from 9.9.1 to 9.11.1. - [Release notes](https://github.com/eslint/eslint/releases) - [Changelog](https://github.com/eslint/eslint/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint/eslint/compare/v9.9.1...v9.11.1) --- updated-dependencies: - dependency-name: eslint dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 223 ++++++++++++++++----------------- 2 files changed, 108 insertions(+), 117 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 2487d27a2e..8b60cbb374 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -59,7 +59,7 @@ "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "@vitejs/plugin-react": "^4.2.1", - "eslint": "^9.9.1", + "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 8ca1c2b600..ac3d762835 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -73,7 +73,7 @@ "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "@vitejs/plugin-react": "^4.2.1", - "eslint": "^9.9.1", + "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", @@ -85,21 +85,30 @@ "vitest": "^2.1.1" } }, + "mantine-ui/node_modules/@types/estree": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "dev": true + }, "mantine-ui/node_modules/eslint": { - "version": "9.9.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.9.1.tgz", - "integrity": "sha512-dHvhrbfr4xFQ9/dq+jcVneZMyRYLjggWjk6RVsIiHsP8Rz6yZ8LvZ//iU4TrZF+SXWG+JkNF2OyiZRvzgRDqMg==", + "version": "9.11.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.11.1.tgz", + "integrity": "sha512-MobhYKIoAO1s1e4VUrgx1l1Sk2JBR/Gqjjgw8+mfgoLE2xwsHur4gdfTxyTgShrhvdVFTaJSgMiQBl1jv/AWxg==", "dev": true, - "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.11.0", "@eslint/config-array": "^0.18.0", + "@eslint/core": "^0.6.0", "@eslint/eslintrc": "^3.1.0", - "@eslint/js": "9.9.1", + "@eslint/js": "9.11.1", + "@eslint/plugin-kit": "^0.2.0", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.3.0", "@nodelib/fs.walk": "^1.2.8", + "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", @@ -119,7 +128,6 @@ "is-glob": "^4.0.0", "is-path-inside": "^3.0.3", "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.2", "natural-compare": "^1.4.0", @@ -145,15 +153,6 @@ } } }, - "mantine-ui/node_modules/eslint/node_modules/@eslint/js": { - "version": "9.9.1", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.9.1.tgz", - "integrity": "sha512-xIDQRsfg5hNBqHz04H1R3scSVwmI+KUbqjsQKHKQ1DAUSaUjYPReZZmS/5PNiKu1fUvzDd6H7DEDKACSEhu+TQ==", - "dev": true, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, "mantine-ui/node_modules/globals": { "version": "15.9.0", "resolved": "https://registry.npmjs.org/globals/-/globals-15.9.0.tgz", @@ -1460,7 +1459,6 @@ "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.18.0.tgz", "integrity": "sha512-fTxvnS1sRMu3+JjXwJG0j/i4RT9u4qJ+lqS/yCGap4lH4zZGzQ7tu+xZqQmcMZq5OBZDL4QRxQzRjkWcGt8IVw==", "dev": true, - "license": "Apache-2.0", "dependencies": { "@eslint/object-schema": "^2.1.4", "debug": "^4.3.1", @@ -1470,6 +1468,15 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, + "node_modules/@eslint/core": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.6.0.tgz", + "integrity": "sha512-8I2Q8ykA4J0x0o7cg67FPVnehcqWTBehu/lmY+bolPFHGjh49YzGBMXTvpqVgEbBdvNCSxj6iFgiIyHzf03lzg==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "node_modules/@eslint/eslintrc": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.1.0.tgz", @@ -1508,7 +1515,18 @@ "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.4.tgz", "integrity": "sha512-BsWiH1yFGjXXS2yvrf5LyuoSIIbPrGUWob917o+BTKuZ7qJdxX8aJLRxs1fS9n6r7vESrq1OUqb68dANcFXuQQ==", "dev": true, - "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.0.tgz", + "integrity": "sha512-vH9PiIMMwvhCx31Af3HiGzsVNULDbyVkHXwlemn/B0TFj/00ho3y55efXrUZTfQipxoHC5u4xq6zblww1zm1Ig==", + "dev": true, + "dependencies": { + "levn": "^0.4.1" + }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } @@ -1567,15 +1585,14 @@ "license": "MIT" }, "node_modules/@humanwhocodes/config-array": { - "version": "0.11.14", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", - "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", "deprecated": "Use @eslint/config-array instead", "dev": true, - "license": "Apache-2.0", "peer": true, "dependencies": { - "@humanwhocodes/object-schema": "^2.0.2", + "@humanwhocodes/object-schema": "^2.0.3", "debug": "^4.3.1", "minimatch": "^3.0.5" }, @@ -1603,7 +1620,6 @@ "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", "deprecated": "Use @eslint/object-schema instead", "dev": true, - "license": "BSD-3-Clause", "peer": true }, "node_modules/@humanwhocodes/retry": { @@ -1611,7 +1627,6 @@ "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.0.tgz", "integrity": "sha512-d2CGZR2o7fS6sWB7DG/3a95bGKQyHMACZ5aW8qGkkqQpUoZV6C0X7Pc7l4ZNMZkfNBf4VWNe9E1jRsf0G146Ew==", "dev": true, - "license": "Apache-2.0", "engines": { "node": ">=18.18" }, @@ -2964,8 +2979,7 @@ "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@types/lodash": { "version": "4.17.9", @@ -3029,8 +3043,7 @@ "version": "7.5.8", "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@types/stack-utils": { "version": "2.0.3", @@ -3098,6 +3111,58 @@ } } }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/type-utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + } + }, "node_modules/@typescript-eslint/parser": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", @@ -3145,34 +3210,6 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/type-utils": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", - "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/typescript-estree": "6.21.0", - "@typescript-eslint/utils": "6.21.0", - "debug": "^4.3.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, "node_modules/@typescript-eslint/types": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", @@ -3242,32 +3279,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/@typescript-eslint/utils": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", - "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.4.0", - "@types/json-schema": "^7.0.12", - "@types/semver": "^7.5.0", - "@typescript-eslint/scope-manager": "6.21.0", - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/typescript-estree": "6.21.0", - "semver": "^7.5.4" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" - } - }, "node_modules/@typescript-eslint/visitor-keys": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", @@ -3355,7 +3366,6 @@ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", "dev": true, - "license": "ISC", "peer": true }, "node_modules/@vitejs/plugin-react": { @@ -4372,7 +4382,6 @@ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", "dev": true, - "license": "Apache-2.0", "peer": true, "dependencies": { "esutils": "^2.0.2" @@ -4583,18 +4592,17 @@ } }, "node_modules/eslint": { - "version": "8.57.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz", - "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", "@eslint/eslintrc": "^2.1.4", - "@eslint/js": "8.57.0", - "@humanwhocodes/config-array": "^0.11.14", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", "@ungap/structured-clone": "^1.2.0", @@ -4706,11 +4714,10 @@ } }, "node_modules/eslint-scope": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.0.2.tgz", - "integrity": "sha512-6E4xmrTw5wtxnLA5wYL3WDfhZ/1bUBGOXV0zQvVRDOtrR8D0p6W7fs3JweNYhwRYeGvd/1CKX2se0/2s7Q/nJA==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.1.0.tgz", + "integrity": "sha512-14dSvlhaVhKKsa9Fx1l8A17s7ah7Ef7wCakJ10LYk6+GYmP9yDti2oq2SEwcyndt6knfcZyhyxwY3i9yL78EQw==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" @@ -4740,7 +4747,6 @@ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "ajv": "^6.12.4", @@ -4761,11 +4767,10 @@ } }, "node_modules/eslint/node_modules/@eslint/js": { - "version": "8.57.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz", - "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4776,7 +4781,6 @@ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, - "license": "BSD-2-Clause", "peer": true, "dependencies": { "esrecurse": "^4.3.0", @@ -4794,7 +4798,6 @@ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, - "license": "Apache-2.0", "peer": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4808,7 +4811,6 @@ "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, - "license": "BSD-2-Clause", "peer": true, "dependencies": { "acorn": "^8.9.0", @@ -4827,7 +4829,6 @@ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "flat-cache": "^3.0.4" @@ -4841,7 +4842,6 @@ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "flatted": "^3.2.9", @@ -4857,7 +4857,6 @@ "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "type-fest": "^0.20.2" @@ -4874,7 +4873,6 @@ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", "dev": true, - "license": "(MIT OR CC0-1.0)", "peer": true, "engines": { "node": ">=10" @@ -4934,7 +4932,6 @@ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "estraverse": "^5.2.0" }, @@ -5107,7 +5104,6 @@ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "dev": true, - "license": "MIT", "dependencies": { "flat-cache": "^4.0.0" }, @@ -5183,7 +5179,6 @@ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", "dev": true, - "license": "MIT", "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" @@ -5196,8 +5191,7 @@ "version": "3.3.1", "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/form-data": { "version": "4.0.0", @@ -6895,8 +6889,7 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", @@ -6945,7 +6938,6 @@ "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "dev": true, - "license": "MIT", "dependencies": { "json-buffer": "3.0.1" } @@ -8310,7 +8302,6 @@ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, - "license": "ISC", "peer": true, "dependencies": { "glob": "^7.1.3" From 98cf5942ae054cb5223b69087866e87bef5ee10a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:07:46 +0000 Subject: [PATCH 104/137] Bump postcss from 8.4.44 to 8.4.47 in /web/ui Bumps [postcss](https://github.com/postcss/postcss) from 8.4.44 to 8.4.47. - [Release notes](https://github.com/postcss/postcss/releases) - [Changelog](https://github.com/postcss/postcss/blob/main/CHANGELOG.md) - [Commits](https://github.com/postcss/postcss/compare/8.4.44...8.4.47) --- updated-dependencies: - dependency-name: postcss dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 27 ++++++++++++--------------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 2487d27a2e..e1079805fe 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -64,7 +64,7 @@ "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", "jsdom": "^25.0.1", - "postcss": "^8.4.35", + "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", "vite": "^5.4.8", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 8ca1c2b600..7137da077d 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -78,7 +78,7 @@ "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.9.0", "jsdom": "^25.0.1", - "postcss": "^8.4.35", + "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", "vite": "^5.4.8", @@ -7555,10 +7555,9 @@ } }, "node_modules/picocolors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", - "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==", - "license": "ISC" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", + "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==" }, "node_modules/picomatch": { "version": "2.3.1", @@ -7659,9 +7658,9 @@ } }, "node_modules/postcss": { - "version": "8.4.44", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.44.tgz", - "integrity": "sha512-Aweb9unOEpQ3ezu4Q00DPvvM2ZTUitJdNKeP/+uQgr1IBIqu574IaZoURId7BKtWMREwzKa9OgzPzezWGPWFQw==", + "version": "8.4.47", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.47.tgz", + "integrity": "sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==", "funding": [ { "type": "opencollective", @@ -7676,11 +7675,10 @@ "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "dependencies": { "nanoid": "^3.3.7", - "picocolors": "^1.0.1", - "source-map-js": "^1.2.0" + "picocolors": "^1.1.0", + "source-map-js": "^1.2.1" }, "engines": { "node": "^10 || ^12 || >=14" @@ -8519,10 +8517,9 @@ } }, "node_modules/source-map-js": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", - "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", - "license": "BSD-3-Clause", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", "engines": { "node": ">=0.10.0" } From c0a0520b5455deca7543342a843d6b29971056c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:55:37 +0000 Subject: [PATCH 105/137] Bump globals from 15.9.0 to 15.10.0 in /web/ui Bumps [globals](https://github.com/sindresorhus/globals) from 15.9.0 to 15.10.0. - [Release notes](https://github.com/sindresorhus/globals/releases) - [Commits](https://github.com/sindresorhus/globals/compare/v15.9.0...v15.10.0) --- updated-dependencies: - dependency-name: globals dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 34 ++++++++++++++++------------------ 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 3299336257..ee9f2f1e60 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -62,7 +62,7 @@ "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", - "globals": "^15.9.0", + "globals": "^15.10.0", "jsdom": "^25.0.1", "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f33f54562e..5616a83819 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -76,7 +76,7 @@ "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", - "globals": "^15.9.0", + "globals": "^15.10.0", "jsdom": "^25.0.1", "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", @@ -153,19 +153,6 @@ } } }, - "mantine-ui/node_modules/globals": { - "version": "15.9.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-15.9.0.tgz", - "integrity": "sha512-SmSKyLLKFbSr6rptvP8izbyxJL4ILwqO9Jg23UA0sDlGlu58V59D1//I3vlc0KJphVdUR7vMjHIplYnzBxorQA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", "version": "0.300.0-beta.0", @@ -1500,6 +1487,18 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@eslint/js": { "version": "9.11.1", "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.11.1.tgz", @@ -5341,11 +5340,10 @@ } }, "node_modules/globals": { - "version": "14.0.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", - "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "version": "15.10.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-15.10.0.tgz", + "integrity": "sha512-tqFIbz83w4Y5TCbtgjZjApohbuh7K9BxGYFm7ifwDR240tvdb7P9x+/9VvUKlmkPoiknoJtanI8UOrqxS3a7lQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=18" }, From 4f448d4c4ca9a47af1549fb38e24246e775693d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:56:23 +0000 Subject: [PATCH 106/137] Bump @tabler/icons-react from 2.47.0 to 3.19.0 in /web/ui Bumps [@tabler/icons-react](https://github.com/tabler/tabler-icons/tree/HEAD/packages/icons-react) from 2.47.0 to 3.19.0. - [Release notes](https://github.com/tabler/tabler-icons/releases) - [Commits](https://github.com/tabler/tabler-icons/commits/v3.19.0/packages/icons-react) --- updated-dependencies: - dependency-name: "@tabler/icons-react" dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 21 +++++++++------------ 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 3299336257..514f78d8ca 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -30,7 +30,7 @@ "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@reduxjs/toolkit": "^2.2.1", - "@tabler/icons-react": "^2.47.0", + "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f33f54562e..11fbb57f0f 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -44,7 +44,7 @@ "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@reduxjs/toolkit": "^2.2.1", - "@tabler/icons-react": "^2.47.0", + "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", @@ -2696,30 +2696,27 @@ } }, "node_modules/@tabler/icons": { - "version": "2.47.0", - "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-2.47.0.tgz", - "integrity": "sha512-4w5evLh+7FUUiA1GucvGj2ReX2TvOjEr4ejXdwL/bsjoSkof6r1gQmzqI+VHrE2CpJpB3al7bCTulOkFa/RcyA==", - "license": "MIT", + "version": "3.19.0", + "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.19.0.tgz", + "integrity": "sha512-A4WEWqpdbTfnpFEtwXqwAe9qf9sp1yRPvzppqAuwcoF0q5YInqB+JkJtSFToCyBpPVeLxJUxxkapLvt2qQgnag==", "funding": { "type": "github", "url": "https://github.com/sponsors/codecalm" } }, "node_modules/@tabler/icons-react": { - "version": "2.47.0", - "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-2.47.0.tgz", - "integrity": "sha512-iqly2FvCF/qUbgmvS8E40rVeYY7laltc5GUjRxQj59DuX0x/6CpKHTXt86YlI2whg4czvd/c8Ce8YR08uEku0g==", - "license": "MIT", + "version": "3.19.0", + "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.19.0.tgz", + "integrity": "sha512-AqEWGI0tQWgqo6ZjMO5yJ9sYT8oXLuAM/up0hN9iENS6IdtNZryKrkNSiMgpwweNTpl8wFFG/dAZ959S91A/uQ==", "dependencies": { - "@tabler/icons": "2.47.0", - "prop-types": "^15.7.2" + "@tabler/icons": "3.19.0" }, "funding": { "type": "github", "url": "https://github.com/sponsors/codecalm" }, "peerDependencies": { - "react": "^16.5.1 || ^17.0.0 || ^18.0.0" + "react": ">= 16" } }, "node_modules/@tanstack/query-core": { From d3b0ab453cc584fbec5bdf67586f782d8765f3db Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 2 Oct 2024 08:05:20 +0200 Subject: [PATCH 107/137] Fix tabler icon props import after version bump Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/components/InfoPageCard.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/ui/mantine-ui/src/components/InfoPageCard.tsx b/web/ui/mantine-ui/src/components/InfoPageCard.tsx index 3d0817e6d6..f6797133c4 100644 --- a/web/ui/mantine-ui/src/components/InfoPageCard.tsx +++ b/web/ui/mantine-ui/src/components/InfoPageCard.tsx @@ -1,12 +1,12 @@ import { Card, Group } from "@mantine/core"; -import { TablerIconsProps } from "@tabler/icons-react"; +import { IconProps } from "@tabler/icons-react"; import { FC, ReactNode } from "react"; import { infoPageCardTitleIconStyle } from "../styles"; const InfoPageCard: FC<{ children: ReactNode; title?: string; - icon?: React.ComponentType; + icon?: React.ComponentType; }> = ({ children, title, icon: Icon }) => { return ( From 98cd80b2e224220d8a4511d21522bb70c9e834a8 Mon Sep 17 00:00:00 2001 From: Manik Rana Date: Wed, 2 Oct 2024 12:18:27 +0530 Subject: [PATCH 108/137] feat: add microbenchmarks for OM CT parsing (#14933) * test: benchmark OM CT parsing Signed-off-by: Manik Rana * refac: move OM ct benchmark to promparse_test Signed-off-by: Manik Rana * chore: stricter comparison Co-authored-by: Arthur Silva Sens Signed-off-by: Manik Rana * feat: use richer OM test data Signed-off-by: Manik Rana * refac: move parse-ct test outside of inner loop Signed-off-by: Manik Rana * refac: separate benchmarks for om and prom parsers Signed-off-by: Manik Rana * chore: remove unused code Signed-off-by: Manik Rana * chore: remove more unused code Signed-off-by: Manik Rana * refac: rename to BenchmarkOMParseCreatedTimestamp Co-authored-by: Bartlomiej Plotka Signed-off-by: Manik Rana --------- Signed-off-by: Manik Rana Signed-off-by: Manik Rana Co-authored-by: Arthur Silva Sens Co-authored-by: Bartlomiej Plotka --- model/textparse/omtestdata.txt | 64 ++++++++++++++++++++++++ model/textparse/openmetricsparse_test.go | 44 ++++++++++++++++ model/textparse/promparse_test.go | 2 +- 3 files changed, 109 insertions(+), 1 deletion(-) create mode 100644 model/textparse/omtestdata.txt diff --git a/model/textparse/omtestdata.txt b/model/textparse/omtestdata.txt new file mode 100644 index 0000000000..0f5f78b8b9 --- /dev/null +++ b/model/textparse/omtestdata.txt @@ -0,0 +1,64 @@ +# HELP go_build_info Build information about the main Go module. +# TYPE go_build_info gauge +go_build_info{checksum="",path="",version=""} 1.0 +# HELP promhttp_metric_handler_errors Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors counter +promhttp_metric_handler_errors_total{cause="encoding"} 0.0 +promhttp_metric_handler_errors_created{cause="encoding"} 1.726839813016397e+09 +promhttp_metric_handler_errors_total{cause="gathering"} 0.0 +promhttp_metric_handler_errors_created{cause="gathering"} 1.726839813016395e+09 +# HELP rpc_durations_histogram_seconds RPC latency distributions. +# TYPE rpc_durations_histogram_seconds histogram +rpc_durations_histogram_seconds_bucket{le="-0.00099"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.00089"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0007899999999999999"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0006899999999999999"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0005899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0004899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0003899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0002899999999999998"} 3 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09 +rpc_durations_histogram_seconds_bucket{le="-0.0001899999999999998"} 5 # {dummyID="84741"} -0.00020178290006788965 1.726839814829977e+09 +rpc_durations_histogram_seconds_bucket{le="-8.999999999999979e-05"} 5 +rpc_durations_histogram_seconds_bucket{le="1.0000000000000216e-05"} 8 # {dummyID="19206"} -4.6156147425468016e-05 1.7268398151337721e+09 +rpc_durations_histogram_seconds_bucket{le="0.00011000000000000022"} 9 # {dummyID="3974"} 9.528436760156754e-05 1.726839814526797e+09 +rpc_durations_histogram_seconds_bucket{le="0.00021000000000000023"} 11 # {dummyID="29640"} 0.00017459624183458996 1.7268398139220061e+09 +rpc_durations_histogram_seconds_bucket{le="0.0003100000000000002"} 15 # {dummyID="9818"} 0.0002791130914009552 1.7268398149821382e+09 +rpc_durations_histogram_seconds_bucket{le="0.0004100000000000002"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0005100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0006100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0007100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0008100000000000004"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0009100000000000004"} 15 +rpc_durations_histogram_seconds_bucket{le="+Inf"} 15 +rpc_durations_histogram_seconds_sum -8.452185437166741e-05 +rpc_durations_histogram_seconds_count 15 +rpc_durations_histogram_seconds_created 1.726839813016302e+09 +# HELP rpc_durations_seconds RPC latency distributions. +# TYPE rpc_durations_seconds summary +rpc_durations_seconds{service="exponential",quantile="0.5"} 7.689368882420941e-07 +rpc_durations_seconds{service="exponential",quantile="0.9"} 1.6537614174305048e-06 +rpc_durations_seconds{service="exponential",quantile="0.99"} 2.0965499063061924e-06 +rpc_durations_seconds_sum{service="exponential"} 2.0318666372575776e-05 +rpc_durations_seconds_count{service="exponential"} 22 +rpc_durations_seconds_created{service="exponential"} 1.7268398130168908e+09 +rpc_durations_seconds{service="normal",quantile="0.5"} -5.066758674917046e-06 +rpc_durations_seconds{service="normal",quantile="0.9"} 0.0002935723711788224 +rpc_durations_seconds{service="normal",quantile="0.99"} 0.0003023094636293776 +rpc_durations_seconds_sum{service="normal"} -8.452185437166741e-05 +rpc_durations_seconds_count{service="normal"} 15 +rpc_durations_seconds_created{service="normal"} 1.726839813016714e+09 +rpc_durations_seconds{service="uniform",quantile="0.5"} 9.005014931474918e-05 +rpc_durations_seconds{service="uniform",quantile="0.9"} 0.00017801230208182325 +rpc_durations_seconds{service="uniform",quantile="0.99"} 0.00018641524538180192 +rpc_durations_seconds_sum{service="uniform"} 0.0011666095700533677 +rpc_durations_seconds_count{service="uniform"} 11 +rpc_durations_seconds_created{service="uniform"} 1.72683981301684e+09 +# HELP rpc_requests Total number of RPC requests received. +# TYPE rpc_requests counter +rpc_requests_total{service="exponential"} 22.0 +rpc_requests_created{service="exponential"} 1.726839813016893e+09 +rpc_requests_total{service="normal"} 15.0 +rpc_requests_created{service="normal"} 1.726839813016717e+09 +rpc_requests_total{service="uniform"} 11.0 +rpc_requests_created{service="uniform"} 1.7268398130168471e+09 +# EOF diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index cadaabc99f..ce1261f5c2 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -16,6 +16,7 @@ package textparse import ( "errors" "io" + "os" "testing" "github.com/prometheus/common/model" @@ -992,3 +993,46 @@ go_gc_duration_seconds_created`) require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]])) require.False(t, copyParser.skipCTSeries) } + +func BenchmarkOMParseCreatedTimestamp(b *testing.B) { + for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ + "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st) + }, + "openmetrics-skip-ct": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + }, + } { + f, err := os.Open("omtestdata.txt") + require.NoError(b, err) + defer f.Close() + + buf, err := io.ReadAll(f) + require.NoError(b, err) + + b.Run(parserName+"/parse-ct/"+"omtestdata.txt", func(b *testing.B) { + b.SetBytes(int64(len(buf) / promtestdataSampleCount)) + b.ReportAllocs() + b.ResetTimer() + + st := labels.NewSymbolTable() + for i := 0; i < b.N; i += promtestdataSampleCount { + p := parser(buf, st) + + Outer: + for i < b.N { + t, err := p.Next() + switch t { + case EntryInvalid: + if errors.Is(err, io.EOF) { + break Outer + } + b.Fatal(err) + case EntrySeries: + p.CreatedTimestamp() + } + } + } + }) + } +} diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index ce9daf53e0..4520dfe9a9 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -492,7 +492,7 @@ const ( promtestdataSampleCount = 410 ) -func BenchmarkParse(b *testing.B) { +func BenchmarkPromParse(b *testing.B) { for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ "prometheus": NewPromParser, "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { From e99e7ca9cfa4f0eb2946f225d82aa9fdec835752 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Wed, 2 Oct 2024 12:40:31 +0200 Subject: [PATCH 109/137] README: Update readme with API flag change for the otlp receiver (#15073) Signed-off-by: Jesus Vazquez --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7fbdadfa62..8874d254f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## unreleased * [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930 +* [CHANGE] API: The OTLP receiver endpoint can now be enabled using `--web.enable-otlp-receiver` instead of `--enable-feature=otlp-write-receiver`. #14894 * [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910 ## 3.0.0-beta.0 / 2024-09-05 From f1c57a95ed7df70176934a38799354f692ac8963 Mon Sep 17 00:00:00 2001 From: Manik Rana Date: Wed, 2 Oct 2024 16:22:03 +0530 Subject: [PATCH 110/137] change: No longer ingest OM _created as timeseries if feature-flag 'enable-ct-zero-ingestion' is enabled; fixed OM text CT conversion bug (#14738) * chore: revert TypeRequiresCT to private Signed-off-by: Manik Rana * feat: init NewOpenMetricsParser with skipCT true Signed-off-by: Manik Rana * refac: allow opt-in to OM CT ingestion Signed-off-by: Manik Rana * chore: lint Signed-off-by: Manik Rana * chore: use textparse interface to set om options Signed-off-by: Manik Rana * fix: set skipOMSeries in test Signed-off-by: Manik Rana * chore: gofumpt Signed-off-by: Manik Rana * wip: add tests for OM CR parse Signed-off-by: Manik Rana * chore: merge ct tests Signed-off-by: Manik Rana * tests: add cases for OM text Signed-off-by: Manik Rana * fix: check correct test cases Signed-off-by: Manik Rana * chore: use both scrape protocols in config Signed-off-by: Manik Rana * fix: fix inputs and output tests for OM Signed-off-by: Manik Rana * chore: cleanup Signed-off-by: Manik Rana * refac: rename skipOMSeries to skipOMCTSeries Co-authored-by: Arthur Silva Sens Signed-off-by: Manik Rana * fix: finish refac Signed-off-by: Manik Rana * refac: move setup code outside test Signed-off-by: Manik Rana * tests: verify _created lines create new metric in certain cases Signed-off-by: Manik Rana * fix: post merge fixes Signed-off-by: Manik Rana * chore: lint Signed-off-by: Manik Rana * manager: Fixed CT OMText conversion bug; Refactored tests. Signed-off-by: bwplotka * chore: lint Signed-off-by: Manik Rana * chore: gofumpt Signed-off-by: Manik Rana * chore: imports Signed-off-by: Manik Rana --------- Signed-off-by: Manik Rana Signed-off-by: Manik Rana Signed-off-by: bwplotka Co-authored-by: Arthur Silva Sens Co-authored-by: bwplotka --- model/textparse/interface.go | 6 +- model/textparse/interface_test.go | 2 +- model/textparse/openmetricsparse.go | 5 +- model/textparse/openmetricsparse_test.go | 70 +++--- promql/fuzz.go | 2 +- scrape/manager_test.go | 304 ++++++++++++++--------- scrape/scrape.go | 2 +- scrape/scrape_test.go | 2 +- 8 files changed, 227 insertions(+), 166 deletions(-) diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 0b5d9281e4..7de88a4869 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -80,7 +80,7 @@ type Parser interface { // // This function always returns a valid parser, but might additionally // return an error if the content type cannot be parsed. -func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.SymbolTable) (Parser, error) { +func New(b []byte, contentType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) { if contentType == "" { return NewPromParser(b, st), nil } @@ -91,7 +91,9 @@ func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.S } switch mediaType { case "application/openmetrics-text": - return NewOpenMetricsParser(b, st), nil + return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) { + o.SkipCTSeries = skipOMCTSeries + }), nil case "application/vnd.google.protobuf": return NewProtobufParser(b, parseClassicHistograms, st), nil default: diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index c644565628..970b96706e 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -93,7 +93,7 @@ func TestNewParser(t *testing.T) { tt := tt // Copy to local variable before going parallel. t.Parallel() - p, err := New([]byte{}, tt.contentType, false, labels.NewSymbolTable()) + p, err := New([]byte{}, tt.contentType, false, false, labels.NewSymbolTable()) tt.validateParser(t, p) if tt.err == "" { require.NoError(t, err) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index ea7607c3a7..8ec1b62ffb 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -297,7 +297,10 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { // CT line for a different series, for our series no CT. return nil } - ct := int64(peek.val) + + // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. + // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps + ct := int64(peek.val * 1000.0) return &ct } } diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index ce1261f5c2..93033380b0 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -70,23 +70,23 @@ testmetric{label="\"bar\""} 1 # HELP foo Counter with and without labels to certify CT is parsed for both cases # TYPE foo counter foo_total 17.0 1520879607.789 # {id="counter-test"} 5 -foo_created 1000 +foo_created 1520872607.123 foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 -foo_created{a="b"} 1000 +foo_created{a="b"} 1520872607.123 # HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far # TYPE bar summary bar_count 17.0 bar_sum 324789.3 bar{quantile="0.95"} 123.7 bar{quantile="0.99"} 150.0 -bar_created 1520430000 +bar_created 1520872607.123 # HELP baz Histogram with the same objective as above's summary # TYPE baz histogram baz_bucket{le="0.0"} 0 baz_bucket{le="+Inf"} 17 baz_count 17 baz_sum 324789.3 -baz_created 1520430000 +baz_created 1520872607.123 # HELP fizz_created Gauge which shouldn't be parsed as CT # TYPE fizz_created gauge fizz_created 17.0` @@ -251,14 +251,14 @@ fizz_created 17.0` lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, - ct: int64p(1000), + ct: int64p(1520872607123), }, { m: `foo_total{a="b"}`, v: 17.0, lset: labels.FromStrings("__name__", "foo_total", "a", "b"), t: int64p(1520879607789), e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, - ct: int64p(1000), + ct: int64p(1520872607123), }, { m: "bar", help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", @@ -269,22 +269,22 @@ fizz_created 17.0` m: "bar_count", v: 17.0, lset: labels.FromStrings("__name__", "bar_count"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: "bar_sum", v: 324789.3, lset: labels.FromStrings("__name__", "bar_sum"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: `bar{quantile="0.95"}`, v: 123.7, lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: `bar{quantile="0.99"}`, v: 150.0, lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: "baz", help: "Histogram with the same objective as above's summary", @@ -295,22 +295,22 @@ fizz_created 17.0` m: `baz_bucket{le="0.0"}`, v: 0, lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: `baz_bucket{le="+Inf"}`, v: 17, lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: `baz_count`, v: 17, lset: labels.FromStrings("__name__", "baz_count"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: `baz_sum`, v: 324789.3, lset: labels.FromStrings("__name__", "baz_sum"), - ct: int64p(1520430000), + ct: int64p(1520872607123), }, { m: "fizz_created", help: "Gauge which shouldn't be parsed as CT", @@ -347,7 +347,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) { # UNIT "go.gc_duration_seconds" seconds {"go.gc_duration_seconds",quantile="0"} 4.9351e-05 {"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 -{"go.gc_duration_seconds_created"} 12313 +{"go.gc_duration_seconds_created"} 1520872607.123 {"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 @@ -371,12 +371,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), - ct: int64p(12313), + ct: int64p(1520872607123), }, { m: `{"go.gc_duration_seconds",quantile="0.25"}`, v: 7.424100000000001e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), - ct: int64p(12313), + ct: int64p(1520872607123), }, { m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, v: 8.3835e-05, @@ -700,7 +700,7 @@ func TestOpenMetricsParseErrors(t *testing.T) { } for i, c := range cases { - p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable()) + p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) var err error for err == nil { _, err = p.Next() @@ -765,7 +765,7 @@ func TestOMNullByteHandling(t *testing.T) { } for i, c := range cases { - p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable()) + p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) var err error for err == nil { _, err = p.Next() @@ -788,12 +788,12 @@ func TestCTParseFailures(t *testing.T) { # TYPE something histogram something_count 17 something_sum 324789.3 -something_created 1520430001 +something_created 1520872607.123 something_bucket{le="0.0"} 0 something_bucket{le="+Inf"} 17 # HELP thing Histogram with _created as first line # TYPE thing histogram -thing_created 1520430002 +thing_created 1520872607.123 thing_count 17 thing_sum 324789.3 thing_bucket{le="0.0"} 0 @@ -802,12 +802,12 @@ thing_bucket{le="+Inf"} 17 # TYPE yum summary yum_count 17.0 yum_sum 324789.3 -yum_created 1520430003 +yum_created 1520872607.123 yum{quantile="0.95"} 123.7 yum{quantile="0.99"} 150.0 # HELP foobar Summary with _created as the first line # TYPE foobar summary -foobar_created 1520430004 +foobar_created 1520872607.123 foobar_count 17.0 foobar_sum 324789.3 foobar{quantile="0.95"} 123.7 @@ -836,19 +836,19 @@ foobar{quantile="0.99"} 150.0` isErr: false, }, { m: `something_count`, - ct: int64p(1520430001), + ct: int64p(1520872607123), isErr: false, }, { m: `something_sum`, - ct: int64p(1520430001), + ct: int64p(1520872607123), isErr: false, }, { m: `something_bucket{le="0.0"}`, - ct: int64p(1520430001), + ct: int64p(1520872607123), isErr: true, }, { m: `something_bucket{le="+Inf"}`, - ct: int64p(1520430001), + ct: int64p(1520872607123), isErr: true, }, { m: "thing", @@ -860,19 +860,19 @@ foobar{quantile="0.99"} 150.0` isErr: false, }, { m: `thing_count`, - ct: int64p(1520430002), + ct: int64p(1520872607123), isErr: true, }, { m: `thing_sum`, - ct: int64p(1520430002), + ct: int64p(1520872607123), isErr: true, }, { m: `thing_bucket{le="0.0"}`, - ct: int64p(1520430002), + ct: int64p(1520872607123), isErr: true, }, { m: `thing_bucket{le="+Inf"}`, - ct: int64p(1520430002), + ct: int64p(1520872607123), isErr: true, }, { m: "yum", @@ -884,19 +884,19 @@ foobar{quantile="0.99"} 150.0` isErr: false, }, { m: "yum_count", - ct: int64p(1520430003), + ct: int64p(1520872607123), isErr: false, }, { m: "yum_sum", - ct: int64p(1520430003), + ct: int64p(1520872607123), isErr: false, }, { m: `yum{quantile="0.95"}`, - ct: int64p(1520430003), + ct: int64p(1520872607123), isErr: true, }, { m: `yum{quantile="0.99"}`, - ct: int64p(1520430003), + ct: int64p(1520872607123), isErr: true, }, { m: "foobar", diff --git a/promql/fuzz.go b/promql/fuzz.go index 3fd50b9496..57fd1166ac 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -61,7 +61,7 @@ const ( var symbolTable = labels.NewSymbolTable() func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType, false, symbolTable) + p, warning := textparse.New(in, contentType, false, false, symbolTable) if warning != nil { // An invalid content type is being passed, which should not happen // in this context. diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 7e01238cc6..8d2c3c9681 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -14,6 +14,7 @@ package scrape import ( + "bytes" "context" "fmt" "net/http" @@ -30,11 +31,14 @@ import ( "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" _ "github.com/prometheus/prometheus/discovery/file" @@ -719,143 +723,195 @@ scrape_configs: require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools()) } -// TestManagerCTZeroIngestion tests scrape manager for CT cases. -func TestManagerCTZeroIngestion(t *testing.T) { - const mName = "expected_counter" - - for _, tc := range []struct { - name string - counterSample *dto.Counter - enableCTZeroIngestion bool - }{ - { - name: "disabled with CT on counter", - counterSample: &dto.Counter{ - Value: proto.Float64(1.0), - // Timestamp does not matter as long as it exists in this test. - CreatedTimestamp: timestamppb.Now(), - }, +func setupScrapeManager(t *testing.T, honorTimestamps, enableCTZeroIngestion bool) (*collectResultAppender, *Manager) { + app := &collectResultAppender{} + scrapeManager, err := NewManager( + &Options{ + EnableCreatedTimestampZeroIngestion: enableCTZeroIngestion, + skipOffsetting: true, }, - { - name: "enabled with CT on counter", - counterSample: &dto.Counter{ - Value: proto.Float64(1.0), - // Timestamp does not matter as long as it exists in this test. - CreatedTimestamp: timestamppb.Now(), - }, - enableCTZeroIngestion: true, + log.NewLogfmtLogger(os.Stderr), + nil, + &collectResultAppendable{app}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ + GlobalConfig: config.GlobalConfig{ + // Disable regular scrapes. + ScrapeInterval: model.Duration(9999 * time.Minute), + ScrapeTimeout: model.Duration(5 * time.Second), + ScrapeProtocols: []config.ScrapeProtocol{config.OpenMetricsText1_0_0, config.PrometheusProto}, }, - { - name: "enabled without CT on counter", - counterSample: &dto.Counter{ - Value: proto.Float64(1.0), - }, - enableCTZeroIngestion: true, - }, - } { - t.Run(tc.name, func(t *testing.T) { - app := &collectResultAppender{} - scrapeManager, err := NewManager( - &Options{ - EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion, - skipOffsetting: true, - }, - log.NewLogfmtLogger(os.Stderr), - nil, - &collectResultAppendable{app}, - prometheus.NewRegistry(), - ) - require.NoError(t, err) + ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test", HonorTimestamps: honorTimestamps}}, + })) - require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ - GlobalConfig: config.GlobalConfig{ - // Disable regular scrapes. - ScrapeInterval: model.Duration(9999 * time.Minute), - ScrapeTimeout: model.Duration(5 * time.Second), - // Ensure the proto is chosen. We need proto as it's the only protocol - // with the CT parsing support. - ScrapeProtocols: []config.ScrapeProtocol{config.PrometheusProto}, - }, - ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test"}}, - })) + return app, scrapeManager +} - once := sync.Once{} - // Start fake HTTP target to that allow one scrape only. - server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fail := true - once.Do(func() { - fail = false - w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) +func setupTestServer(t *testing.T, typ string, toWrite []byte) *httptest.Server { + once := sync.Once{} - ctrType := dto.MetricType_COUNTER - w.Write(protoMarshalDelimited(t, &dto.MetricFamily{ - Name: proto.String(mName), - Type: &ctrType, - Metric: []*dto.Metric{{Counter: tc.counterSample}}, - })) - }) - - if fail { - w.WriteHeader(http.StatusInternalServerError) - } - }), - ) - defer server.Close() - - serverURL, err := url.Parse(server.URL) - require.NoError(t, err) - - // Add fake target directly into tsets + reload. Normally users would use - // Manager.Run and wait for minimum 5s refresh interval. - scrapeManager.updateTsets(map[string][]*targetgroup.Group{ - "test": {{ - Targets: []model.LabelSet{{ - model.SchemeLabel: model.LabelValue(serverURL.Scheme), - model.AddressLabel: model.LabelValue(serverURL.Host), - }}, - }}, + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fail := true + once.Do(func() { + fail = false + w.Header().Set("Content-Type", typ) + w.Write(toWrite) }) - scrapeManager.reload() - var got []float64 - // Wait for one scrape. - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() - require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { - app.mtx.Lock() - defer app.mtx.Unlock() - - // Check if scrape happened and grab the relevant samples, they have to be there - or it's a bug - // and it's not worth waiting. - for _, f := range app.resultFloats { - if f.metric.Get(model.MetricNameLabel) == mName { - got = append(got, f.f) - } - } - if len(app.resultFloats) > 0 { - return nil - } - return fmt.Errorf("expected some samples, got none") - }), "after 1 minute") - scrapeManager.Stop() - - // Check for zero samples, assuming we only injected always one sample. - // Did it contain CT to inject? If yes, was CT zero enabled? - if tc.counterSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion { - require.Len(t, got, 2) - require.Equal(t, 0.0, got[0]) - require.Equal(t, tc.counterSample.GetValue(), got[1]) - return + if fail { + w.WriteHeader(http.StatusInternalServerError) } + }), + ) - // Expect only one, valid sample. - require.Len(t, got, 1) - require.Equal(t, tc.counterSample.GetValue(), got[0]) + t.Cleanup(func() { server.Close() }) + + return server +} + +// TestManagerCTZeroIngestion tests scrape manager for various CT cases. +func TestManagerCTZeroIngestion(t *testing.T) { + const ( + // _total suffix is required, otherwise expfmt with OMText will mark metric as "unknown" + expectedMetricName = "expected_metric_total" + expectedCreatedMetricName = "expected_metric_created" + expectedSampleValue = 17.0 + ) + + for _, testFormat := range []config.ScrapeProtocol{config.PrometheusProto, config.OpenMetricsText1_0_0} { + t.Run(fmt.Sprintf("format=%s", testFormat), func(t *testing.T) { + for _, testWithCT := range []bool{false, true} { + t.Run(fmt.Sprintf("withCT=%v", testWithCT), func(t *testing.T) { + for _, testCTZeroIngest := range []bool{false, true} { + t.Run(fmt.Sprintf("ctZeroIngest=%v", testCTZeroIngest), func(t *testing.T) { + sampleTs := time.Now() + ctTs := time.Time{} + if testWithCT { + ctTs = sampleTs.Add(-2 * time.Minute) + } + + // TODO(bwplotka): Add more types than just counter? + encoded := prepareTestEncodedCounter(t, testFormat, expectedMetricName, expectedSampleValue, sampleTs, ctTs) + app, scrapeManager := setupScrapeManager(t, true, testCTZeroIngest) + + // Perform the test. + doOneScrape(t, scrapeManager, app, setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded)) + + // Verify results. + // Verify what we got vs expectations around CT injection. + samples := findSamplesForMetric(app.resultFloats, expectedMetricName) + if testWithCT && testCTZeroIngest { + require.Len(t, samples, 2) + require.Equal(t, 0.0, samples[0].f) + require.Equal(t, timestamp.FromTime(ctTs), samples[0].t) + require.Equal(t, expectedSampleValue, samples[1].f) + require.Equal(t, timestamp.FromTime(sampleTs), samples[1].t) + } else { + require.Len(t, samples, 1) + require.Equal(t, expectedSampleValue, samples[0].f) + require.Equal(t, timestamp.FromTime(sampleTs), samples[0].t) + } + + // Verify what we got vs expectations around additional _created series for OM text. + // enableCTZeroInjection also kills that _created line. + createdSeriesSamples := findSamplesForMetric(app.resultFloats, expectedCreatedMetricName) + if testFormat == config.OpenMetricsText1_0_0 && testWithCT && !testCTZeroIngest { + // For OM Text, when counter has CT, and feature flag disabled we should see _created lines. + require.Len(t, createdSeriesSamples, 1) + // Conversion taken from common/expfmt.writeOpenMetricsFloat. + // We don't check the ct timestamp as explicit ts was not implemented in expfmt.Encoder, + // but exists in OM https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created + // We can implement this, but we want to potentially get rid of OM 1.0 CT lines + require.Equal(t, float64(timestamppb.New(ctTs).AsTime().UnixNano())/1e9, createdSeriesSamples[0].f) + } else { + require.Empty(t, createdSeriesSamples) + } + }) + } + }) + } }) } } +func prepareTestEncodedCounter(t *testing.T, format config.ScrapeProtocol, mName string, v float64, ts, ct time.Time) (encoded []byte) { + t.Helper() + + counter := &dto.Counter{Value: proto.Float64(v)} + if !ct.IsZero() { + counter.CreatedTimestamp = timestamppb.New(ct) + } + ctrType := dto.MetricType_COUNTER + inputMetric := &dto.MetricFamily{ + Name: proto.String(mName), + Type: &ctrType, + Metric: []*dto.Metric{{ + TimestampMs: proto.Int64(timestamp.FromTime(ts)), + Counter: counter, + }}, + } + switch format { + case config.PrometheusProto: + return protoMarshalDelimited(t, inputMetric) + case config.OpenMetricsText1_0_0: + buf := &bytes.Buffer{} + require.NoError(t, expfmt.NewEncoder(buf, expfmt.NewFormat(expfmt.TypeOpenMetrics), expfmt.WithCreatedLines(), expfmt.WithUnit()).Encode(inputMetric)) + _, _ = buf.WriteString("# EOF") + + t.Log("produced OM text to expose:", buf.String()) + return buf.Bytes() + default: + t.Fatalf("not implemented format: %v", format) + return nil + } +} + +func doOneScrape(t *testing.T, manager *Manager, appender *collectResultAppender, server *httptest.Server) { + t.Helper() + + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + // Add fake target directly into tsets + reload + manager.updateTsets(map[string][]*targetgroup.Group{ + "test": {{ + Targets: []model.LabelSet{{ + model.SchemeLabel: model.LabelValue(serverURL.Scheme), + model.AddressLabel: model.LabelValue(serverURL.Host), + }}, + }}, + }) + manager.reload() + + // Wait for one scrape. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { + appender.mtx.Lock() + defer appender.mtx.Unlock() + + // Check if scrape happened and grab the relevant samples. + if len(appender.resultFloats) > 0 { + return nil + } + return fmt.Errorf("expected some float samples, got none") + }), "after 1 minute") + manager.Stop() +} + +func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatSample) { + for _, f := range floats { + if f.metric.Get(model.MetricNameLabel) == metricName { + ret = append(ret, f) + } + } + return ret +} + // generateTestHistogram generates the same thing as tsdbutil.GenerateTestHistogram, // but in the form of dto.Histogram. func generateTestHistogram(i int) *dto.Histogram { diff --git a/scrape/scrape.go b/scrape/scrape.go index 071edfca5f..c66f203ddc 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1536,7 +1536,7 @@ type appendErrors struct { } func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { - p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.symbolTable) + p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.enableCTZeroIngestion, sl.symbolTable) if err != nil { level.Debug(sl.l).Log( "msg", "Invalid content type on scrape, using prometheus parser as fallback.", diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 04fd536012..57c51b2e92 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1525,7 +1525,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { fakeRef := storage.SeriesRef(1) expValue := float64(1) metric := []byte(`metric{n="1"} 1`) - p, warning := textparse.New(metric, "", false, labels.NewSymbolTable()) + p, warning := textparse.New(metric, "", false, false, labels.NewSymbolTable()) require.NoError(t, warning) var lset labels.Labels From b5479831b8a5e2723759c93bf1fa305d96ed53de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 1 Oct 2024 15:46:49 +0200 Subject: [PATCH 111/137] Unit test for regression in rate vs float and histogram mixup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- promql/engine_test.go | 115 +++++++++++++++++++++++++++++++++++++++++ storage/buffer_test.go | 50 ++++++++++++++++++ storage/series.go | 28 ++++++++++ 3 files changed, 193 insertions(+) diff --git a/promql/engine_test.go b/promql/engine_test.go index e4171eb5bd..19bd781445 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "math" "sort" "strconv" "strings" @@ -29,11 +30,13 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/teststorage" @@ -3781,3 +3784,115 @@ func TestRateAnnotations(t *testing.T) { }) } } + +func TestHistogramRateWithFloatStaleness(t *testing.T) { + // Make a chunk with two normal histograms of the same value. + h1 := histogram.Histogram{ + Schema: 2, + Count: 10, + Sum: 100, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{100}, + } + + c1 := chunkenc.NewHistogramChunk() + app, err := c1.Appender() + require.NoError(t, err) + var ( + newc chunkenc.Chunk + recoded bool + ) + + newc, recoded, app, err = app.AppendHistogram(nil, 0, h1.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + newc, recoded, _, err = app.AppendHistogram(nil, 10, h1.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + // Make a chunk with a single float stale marker. + c2 := chunkenc.NewXORChunk() + app, err = c2.Appender() + require.NoError(t, err) + + app.Append(20, math.Float64frombits(value.StaleNaN)) + + // Make a chunk with two normal histograms that have zero value. + h2 := histogram.Histogram{ + Schema: 2, + } + + c3 := chunkenc.NewHistogramChunk() + app, err = c3.Appender() + require.NoError(t, err) + + newc, recoded, app, err = app.AppendHistogram(nil, 30, h2.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + newc, recoded, _, err = app.AppendHistogram(nil, 40, h2.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + querier := storage.MockQuerier{ + SelectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { + return &singleSeriesSet{ + series: mockSeries{chunks: []chunkenc.Chunk{c1, c2, c3}, labelSet: []string{"__name__", "foo"}}, + } + }, + } + + queriable := storage.MockQueryable{MockQuerier: &querier} + + engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) + + q, err := engine.NewInstantQuery(context.Background(), &queriable, nil, "rate(foo[40s])", timestamp.Time(45)) + require.NoError(t, err) + defer q.Close() + + res := q.Exec(context.Background()) + require.NoError(t, res.Err) + + vec, err := res.Vector() + require.NoError(t, err) + + // Single sample result. + require.Len(t, vec, 1) + // The result is a histogram. + require.NotNil(t, vec[0].H) + // The result should be zero as the histogram has not increased, so the rate is zero. + require.Equal(t, 0.0, vec[0].H.Count) + require.Equal(t, 0.0, vec[0].H.Sum) +} + +type singleSeriesSet struct { + series storage.Series + consumed bool +} + +func (s *singleSeriesSet) Next() bool { c := s.consumed; s.consumed = true; return !c } +func (s singleSeriesSet) At() storage.Series { return s.series } +func (s singleSeriesSet) Err() error { return nil } +func (s singleSeriesSet) Warnings() annotations.Annotations { return nil } + +type mockSeries struct { + chunks []chunkenc.Chunk + labelSet []string +} + +func (s mockSeries) Labels() labels.Labels { + return labels.FromStrings(s.labelSet...) +} + +func (s mockSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + iterables := []chunkenc.Iterator{} + for _, c := range s.chunks { + iterables = append(iterables, c.Iterator(nil)) + } + return storage.ChainSampleIteratorFromIterators(it, iterables) +} diff --git a/storage/buffer_test.go b/storage/buffer_test.go index b5c6443ac5..6e8e83db8f 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -314,6 +314,56 @@ func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) { require.Equal(t, histograms[1].ToFloat(nil), fh) } +func TestBufferedSeriesIteratorMixedFloatsAndHistograms(t *testing.T) { + histograms := tsdbutil.GenerateTestHistograms(5) + + it := NewBufferIterator(NewListSeriesIteratorWithCopy(samples{ + hSample{t: 1, h: histograms[0].Copy()}, + fSample{t: 2, f: 2}, + hSample{t: 3, h: histograms[1].Copy()}, + hSample{t: 4, h: histograms[2].Copy()}, + fhSample{t: 3, fh: histograms[3].ToFloat(nil)}, + fhSample{t: 4, fh: histograms[4].ToFloat(nil)}, + }), 6) + + require.Equal(t, chunkenc.ValNone, it.Seek(7)) + require.NoError(t, it.Err()) + + buf := it.Buffer() + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, h0 := buf.AtHistogram() + require.Equal(t, histograms[0], h0) + + require.Equal(t, chunkenc.ValFloat, buf.Next()) + _, v := buf.At() + require.Equal(t, 2.0, v) + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, h1 := buf.AtHistogram() + require.Equal(t, histograms[1], h1) + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, h2 := buf.AtHistogram() + require.Equal(t, histograms[2], h2) + + require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) + _, h3 := buf.AtFloatHistogram(nil) + require.Equal(t, histograms[3].ToFloat(nil), h3) + + require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) + _, h4 := buf.AtFloatHistogram(nil) + require.Equal(t, histograms[4].ToFloat(nil), h4) + + // Test for overwrite bug where the buffered histogram was reused + // between items in the buffer. + require.Equal(t, histograms[0], h0) + require.Equal(t, histograms[1], h1) + require.Equal(t, histograms[2], h2) + require.Equal(t, histograms[3].ToFloat(nil), h3) + require.Equal(t, histograms[4].ToFloat(nil), h4) +} + func BenchmarkBufferedSeriesIterator(b *testing.B) { // Simulate a 5 minute rate. it := NewBufferIterator(newFakeSeriesIterator(int64(b.N), 30), 5*60) diff --git a/storage/series.go b/storage/series.go index 70e3d0a199..a3dbec7088 100644 --- a/storage/series.go +++ b/storage/series.go @@ -171,6 +171,34 @@ func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType { func (it *listSeriesIterator) Err() error { return nil } +type listSeriesIteratorWithCopy struct { + *listSeriesIterator +} + +func NewListSeriesIteratorWithCopy(samples Samples) chunkenc.Iterator { + return &listSeriesIteratorWithCopy{ + listSeriesIterator: &listSeriesIterator{samples: samples, idx: -1}, + } +} + +func (it *listSeriesIteratorWithCopy) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { + t, ih := it.listSeriesIterator.AtHistogram(nil) + if h == nil || ih == nil { + return t, ih + } + ih.CopyTo(h) + return t, h +} + +func (it *listSeriesIteratorWithCopy) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + t, ih := it.listSeriesIterator.AtFloatHistogram(nil) + if fh == nil || ih == nil { + return t, ih + } + ih.CopyTo(fh) + return t, fh +} + type listChunkSeriesIterator struct { chks []chunks.Meta idx int From 44ebbb8458adb429c135ea31a29a1852b69d65a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 1 Oct 2024 17:19:13 +0200 Subject: [PATCH 112/137] Fix missing histogram copy in sampleRing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The specialized version of sample add to the ring: func addH(s hSample, buf []hSample, r *sampleRing) []hSample func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample already correctly copy histogram samples from the reused hReader, fhReader buffers, but the generic version does not. This means that the data is overwritten on the next read if the sample ring has seen histogram and float samples at the same time and switched to generic mode. The `genericAdd` function (which was commented anyway) is by now quite different from the specialized functions so that this commit deletes it. Signed-off-by: György Krajcsovits --- storage/buffer.go | 71 +++++++++++------------------------------- tsdb/chunks/samples.go | 12 +++++++ tsdb/head.go | 11 +++++++ 3 files changed, 41 insertions(+), 53 deletions(-) diff --git a/storage/buffer.go b/storage/buffer.go index 9f31fb53fc..ad504ad5db 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -187,6 +187,10 @@ func (s fSample) Type() chunkenc.ValueType { return chunkenc.ValFloat } +func (s fSample) Copy() chunks.Sample { + return s +} + type hSample struct { t int64 h *histogram.Histogram @@ -212,6 +216,10 @@ func (s hSample) Type() chunkenc.ValueType { return chunkenc.ValHistogram } +func (s hSample) Copy() chunks.Sample { + return hSample{t: s.t, h: s.h.Copy()} +} + type fhSample struct { t int64 fh *histogram.FloatHistogram @@ -237,6 +245,10 @@ func (s fhSample) Type() chunkenc.ValueType { return chunkenc.ValFloatHistogram } +func (s fhSample) Copy() chunks.Sample { + return fhSample{t: s.t, fh: s.fh.Copy()} +} + type sampleRing struct { delta int64 @@ -535,55 +547,8 @@ func (r *sampleRing) addFH(s fhSample) { } } -// genericAdd is a generic implementation of adding a chunks.Sample -// implementation to a buffer of a sample ring. However, the Go compiler -// currently (go1.20) decides to not expand the code during compile time, but -// creates dynamic code to handle the different types. That has a significant -// overhead during runtime, noticeable in PromQL benchmarks. For example, the -// "RangeQuery/expr=rate(a_hundred[1d]),steps=.*" benchmarks show about 7% -// longer runtime, 9% higher allocation size, and 10% more allocations. -// Therefore, genericAdd has been manually implemented for all the types -// (addSample, addF, addH, addFH) below. -// -// func genericAdd[T chunks.Sample](s T, buf []T, r *sampleRing) []T { -// l := len(buf) -// // Grow the ring buffer if it fits no more elements. -// if l == 0 { -// buf = make([]T, 16) -// l = 16 -// } -// if l == r.l { -// newBuf := make([]T, 2*l) -// copy(newBuf[l+r.f:], buf[r.f:]) -// copy(newBuf, buf[:r.f]) -// -// buf = newBuf -// r.i = r.f -// r.f += l -// l = 2 * l -// } else { -// r.i++ -// if r.i >= l { -// r.i -= l -// } -// } -// -// buf[r.i] = s -// r.l++ -// -// // Free head of the buffer of samples that just fell out of the range. -// tmin := s.T() - r.delta -// for buf[r.f].T() < tmin { -// r.f++ -// if r.f >= l { -// r.f -= l -// } -// r.l-- -// } -// return buf -// } - -// addSample is a handcoded specialization of genericAdd (see above). +// addSample adds a sample to a buffer of chunks.Sample, i.e. the general case +// using an interface as the type. func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sample { l := len(buf) // Grow the ring buffer if it fits no more elements. @@ -607,7 +572,7 @@ func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sam } } - buf[r.i] = s + buf[r.i] = s.Copy() r.l++ // Free head of the buffer of samples that just fell out of the range. @@ -622,7 +587,7 @@ func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sam return buf } -// addF is a handcoded specialization of genericAdd (see above). +// addF adds an fSample to a (specialized) fSample buffer. func addF(s fSample, buf []fSample, r *sampleRing) []fSample { l := len(buf) // Grow the ring buffer if it fits no more elements. @@ -661,7 +626,7 @@ func addF(s fSample, buf []fSample, r *sampleRing) []fSample { return buf } -// addH is a handcoded specialization of genericAdd (see above). +// addF adds an hSample to a (specialized) hSample buffer. func addH(s hSample, buf []hSample, r *sampleRing) []hSample { l := len(buf) // Grow the ring buffer if it fits no more elements. @@ -705,7 +670,7 @@ func addH(s hSample, buf []hSample, r *sampleRing) []hSample { return buf } -// addFH is a handcoded specialization of genericAdd (see above). +// addFH adds an fhSample to a (specialized) fhSample buffer. func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample { l := len(buf) // Grow the ring buffer if it fits no more elements. diff --git a/tsdb/chunks/samples.go b/tsdb/chunks/samples.go index 638660c70c..a5b16094df 100644 --- a/tsdb/chunks/samples.go +++ b/tsdb/chunks/samples.go @@ -29,6 +29,7 @@ type Sample interface { H() *histogram.Histogram FH() *histogram.FloatHistogram Type() chunkenc.ValueType + Copy() Sample // Returns a deep copy. } type SampleSlice []Sample @@ -70,6 +71,17 @@ func (s sample) Type() chunkenc.ValueType { } } +func (s sample) Copy() Sample { + c := sample{t: s.t, f: s.f} + if s.h != nil { + c.h = s.h.Copy() + } + if s.fh != nil { + c.fh = s.fh.Copy() + } + return c +} + // GenerateSamples starting at start and counting up numSamples. func GenerateSamples(start, numSamples int) []Sample { return generateSamples(start, numSamples, func(i int) Sample { diff --git a/tsdb/head.go b/tsdb/head.go index af16fbf37c..f469e5e345 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -2081,6 +2081,17 @@ func (s sample) Type() chunkenc.ValueType { } } +func (s sample) Copy() chunks.Sample { + c := sample{t: s.t, f: s.f} + if s.h != nil { + c.h = s.h.Copy() + } + if s.fh != nil { + c.fh = s.fh.Copy() + } + return c +} + // memSeries is the in-memory representation of a series. None of its methods // are goroutine safe and it is the caller's responsibility to lock it. type memSeries struct { From 06945b39331a471b2b6b14e0f38a31c6720e3146 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jorge=20Alberto=20D=C3=ADaz=20Orozco?= Date: Wed, 2 Oct 2024 13:37:10 +0200 Subject: [PATCH 113/137] Add a mutex and used ports list to the random port generator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit to avoid port collisions Signed-off-by: Jorge Alberto Díaz Orozco --- util/testutil/port.go | 41 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/util/testutil/port.go b/util/testutil/port.go index 1e449b123d..7cf4cf1ccc 100644 --- a/util/testutil/port.go +++ b/util/testutil/port.go @@ -15,21 +15,56 @@ package testutil import ( "net" + "sync" "testing" ) +var ( + mu sync.Mutex + usedPorts []int +) + // RandomUnprivilegedPort returns valid unprivileged random port number which can be used for testing. func RandomUnprivilegedPort(t *testing.T) int { t.Helper() + mu.Lock() + defer mu.Unlock() + port, err := getPort() + if err != nil { + t.Fatal(err) + } + + for portWasUsed(port) { + port, err = getPort() + if err != nil { + t.Fatal(err) + } + } + + usedPorts = append(usedPorts, port) + + return port +} + +func portWasUsed(port int) bool { + for _, usedPort := range usedPorts { + if port == usedPort { + return true + } + } + return false +} + +func getPort() (int, error) { listener, err := net.Listen("tcp", ":0") if err != nil { - t.Fatalf("Listening on random port: %v", err) + return 0, err } if err := listener.Close(); err != nil { - t.Fatalf("Closing listener: %v", err) + return 0, err } - return listener.Addr().(*net.TCPAddr).Port + return listener.Addr().(*net.TCPAddr).Port, nil } From b6158e8956110b24e8b39072dd6d9c9d0fd649bb Mon Sep 17 00:00:00 2001 From: Julien Date: Thu, 3 Oct 2024 10:26:05 +0200 Subject: [PATCH 114/137] Notify web UI when starting up and shutting down Signed-off-by: Julien --- cmd/prometheus/main.go | 3 +++ web/api/notifications.go | 2 ++ 2 files changed, 5 insertions(+) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index d8369770bc..8ad1db6378 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -503,6 +503,7 @@ func main() { notifs := api.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) cfg.web.NotificationsSub = notifs.Sub cfg.web.NotificationsGetter = notifs.Get + notifs.AddNotification(api.StartingUp) if err := cfg.setFeatureListOptions(logger); err != nil { fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) @@ -989,6 +990,7 @@ func main() { func(err error) { close(cancel) webHandler.SetReady(web.Stopping) + notifs.AddNotification(api.ShuttingDown) }, ) } @@ -1174,6 +1176,7 @@ func main() { reloadReady.Close() webHandler.SetReady(web.Ready) + notifs.DeleteNotification(api.StartingUp) level.Info(logger).Log("msg", "Server is ready to receive web requests.") <-cancel return nil diff --git a/web/api/notifications.go b/web/api/notifications.go index 976f0b0768..a838fbd989 100644 --- a/web/api/notifications.go +++ b/web/api/notifications.go @@ -22,6 +22,8 @@ import ( const ( ConfigurationUnsuccessful = "Configuration reload has failed." + StartingUp = "Prometheus is starting and replaying the write-ahead log (WAL)." + ShuttingDown = "Prometheus is shutting down and gracefully stopping all operations." ) // Notification represents an individual notification message. From 1f40859f9e599de5f96e50a55b0eed614b7583a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 07:44:20 +0000 Subject: [PATCH 115/137] Bump google.golang.org/api from 0.195.0 to 0.199.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.195.0 to 0.199.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.195.0...v0.199.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 4d0c98719b..eb125a66f1 100644 --- a/go.mod +++ b/go.mod @@ -81,9 +81,9 @@ require ( golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.24.0 - google.golang.org/api v0.195.0 + google.golang.org/api v0.199.0 google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 - google.golang.org/grpc v1.66.2 + google.golang.org/grpc v1.67.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -95,9 +95,9 @@ require ( ) require ( - cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth v0.9.5 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -106,7 +106,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect - github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -133,7 +133,7 @@ require ( github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.1 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect diff --git a/go.sum b/go.sum index 73dafaa104..423973d8e8 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= -cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= +cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -22,8 +22,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -120,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= @@ -251,8 +251,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1056,8 +1056,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= -google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= +google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= +google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1116,8 +1116,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= -google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 52cc937af0b784f4d32439c69ed336c02e423019 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 07:46:29 +0000 Subject: [PATCH 116/137] Bump go.uber.org/automaxprocs from 1.5.3 to 1.6.0 Bumps [go.uber.org/automaxprocs](https://github.com/uber-go/automaxprocs) from 1.5.3 to 1.6.0. - [Release notes](https://github.com/uber-go/automaxprocs/releases) - [Changelog](https://github.com/uber-go/automaxprocs/blob/master/CHANGELOG.md) - [Commits](https://github.com/uber-go/automaxprocs/compare/v1.5.3...v1.6.0) --- updated-dependencies: - dependency-name: go.uber.org/automaxprocs dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4d0c98719b..cb8d8a7ff2 100644 --- a/go.mod +++ b/go.mod @@ -72,7 +72,7 @@ require ( go.opentelemetry.io/otel/sdk v1.30.0 go.opentelemetry.io/otel/trace v1.30.0 go.uber.org/atomic v1.11.0 - go.uber.org/automaxprocs v1.5.3 + go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.23.0 diff --git a/go.sum b/go.sum index 73dafaa104..b5d30ca42b 100644 --- a/go.sum +++ b/go.sum @@ -758,8 +758,8 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= From 21e0f83b68789f281a5c1639ccd5b30a486d7fc8 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 4 Oct 2024 10:11:56 +0200 Subject: [PATCH 117/137] Move notifications in utils Signed-off-by: Julien --- cmd/prometheus/main.go | 14 +++++++------- {web/api => util/notifications}/notifications.go | 2 +- .../notifications}/notifications_test.go | 2 +- web/api/v1/api.go | 10 +++++----- web/web.go | 6 +++--- 5 files changed, 17 insertions(+), 17 deletions(-) rename {web/api => util/notifications}/notifications.go (99%) rename {web/api => util/notifications}/notifications_test.go (99%) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 8ad1db6378..11d8caae6f 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -76,9 +76,9 @@ import ( "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/logging" + "github.com/prometheus/prometheus/util/notifications" prom_runtime "github.com/prometheus/prometheus/util/runtime" "github.com/prometheus/prometheus/web" - "github.com/prometheus/prometheus/web/api" ) var ( @@ -500,10 +500,10 @@ func main() { logger := promlog.New(&cfg.promlogConfig) - notifs := api.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) + notifs := notifications.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) cfg.web.NotificationsSub = notifs.Sub cfg.web.NotificationsGetter = notifs.Get - notifs.AddNotification(api.StartingUp) + notifs.AddNotification(notifications.StartingUp) if err := cfg.setFeatureListOptions(logger); err != nil { fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) @@ -990,7 +990,7 @@ func main() { func(err error) { close(cancel) webHandler.SetReady(web.Stopping) - notifs.AddNotification(api.ShuttingDown) + notifs.AddNotification(notifications.ShuttingDown) }, ) } @@ -1091,10 +1091,10 @@ func main() { callback := func(success bool) { if success { - notifs.DeleteNotification(api.ConfigurationUnsuccessful) + notifs.DeleteNotification(notifications.ConfigurationUnsuccessful) return } - notifs.AddNotification(api.ConfigurationUnsuccessful) + notifs.AddNotification(notifications.ConfigurationUnsuccessful) } g.Add( @@ -1176,7 +1176,7 @@ func main() { reloadReady.Close() webHandler.SetReady(web.Ready) - notifs.DeleteNotification(api.StartingUp) + notifs.DeleteNotification(notifications.StartingUp) level.Info(logger).Log("msg", "Server is ready to receive web requests.") <-cancel return nil diff --git a/web/api/notifications.go b/util/notifications/notifications.go similarity index 99% rename from web/api/notifications.go rename to util/notifications/notifications.go index a838fbd989..4888a0b664 100644 --- a/web/api/notifications.go +++ b/util/notifications/notifications.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package api +package notifications import ( "sync" diff --git a/web/api/notifications_test.go b/util/notifications/notifications_test.go similarity index 99% rename from web/api/notifications_test.go rename to util/notifications/notifications_test.go index 437ff1ec4b..e487e9ce54 100644 --- a/web/api/notifications_test.go +++ b/util/notifications/notifications_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package api +package notifications import ( "sync" diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 46666af90c..95ab7ea2ac 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -54,8 +54,8 @@ import ( "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/httputil" + "github.com/prometheus/prometheus/util/notifications" "github.com/prometheus/prometheus/util/stats" - "github.com/prometheus/prometheus/web/api" ) type status string @@ -214,8 +214,8 @@ type API struct { gatherer prometheus.Gatherer isAgent bool statsRenderer StatsRenderer - notificationsGetter func() []api.Notification - notificationsSub func() (<-chan api.Notification, func(), bool) + notificationsGetter func() []notifications.Notification + notificationsSub func() (<-chan notifications.Notification, func(), bool) remoteWriteHandler http.Handler remoteReadHandler http.Handler @@ -249,8 +249,8 @@ func NewAPI( corsOrigin *regexp.Regexp, runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, - notificationsGetter func() []api.Notification, - notificationsSub func() (<-chan api.Notification, func(), bool), + notificationsGetter func() []notifications.Notification, + notificationsSub func() (<-chan notifications.Notification, func(), bool), gatherer prometheus.Gatherer, registerer prometheus.Registerer, statsRenderer StatsRenderer, diff --git a/web/web.go b/web/web.go index 724ca91051..5e1d3d230b 100644 --- a/web/web.go +++ b/web/web.go @@ -59,7 +59,7 @@ import ( "github.com/prometheus/prometheus/template" "github.com/prometheus/prometheus/util/httputil" "github.com/prometheus/prometheus/util/netconnlimit" - "github.com/prometheus/prometheus/web/api" + "github.com/prometheus/prometheus/util/notifications" api_v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/prometheus/prometheus/web/ui" ) @@ -267,8 +267,8 @@ type Options struct { RuleManager *rules.Manager Notifier *notifier.Manager Version *PrometheusVersion - NotificationsGetter func() []api.Notification - NotificationsSub func() (<-chan api.Notification, func(), bool) + NotificationsGetter func() []notifications.Notification + NotificationsSub func() (<-chan notifications.Notification, func(), bool) Flags map[string]string ListenAddresses []string From 9d275c23de3c1d2c1e8c40bfb2f21eb86394d026 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 4 Oct 2024 11:17:59 +0200 Subject: [PATCH 118/137] cmd/prometheus: Fix flakiness of QueryLogTest Now we check that a rule execution has taken place. This also reduces the time to run the rules tests from 45s to 25s. Signed-off-by: Julien --- cmd/prometheus/query_log_test.go | 55 ++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index 62e317bf8b..3b00230cd9 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -125,12 +125,61 @@ func (p *queryLogTest) query(t *testing.T) { require.NoError(t, err) require.Equal(t, 200, r.StatusCode) case ruleOrigin: - time.Sleep(2 * time.Second) + // Poll the /api/v1/rules endpoint until a new rule evaluation is detected. + var lastEvalTime time.Time + for { + r, err := http.Get(fmt.Sprintf("http://%s:%d/api/v1/rules", p.host, p.port)) + require.NoError(t, err) + + rulesBody, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + + // Parse the rules response to find the last evaluation time. + newEvalTime := parseLastEvaluation(rulesBody) + if newEvalTime.After(lastEvalTime) { + if !lastEvalTime.IsZero() { + break + } + lastEvalTime = newEvalTime + } + + time.Sleep(100 * time.Millisecond) + } default: panic("can't query this origin") } } +// parseLastEvaluation extracts the last evaluation timestamp from the /api/v1/rules response. +func parseLastEvaluation(rulesBody []byte) time.Time { + var ruleResponse struct { + Status string `json:"status"` + Data struct { + Groups []struct { + Rules []struct { + LastEvaluation string `json:"lastEvaluation"` + } `json:"rules"` + } `json:"groups"` + } `json:"data"` + } + + err := json.Unmarshal(rulesBody, &ruleResponse) + if err != nil { + return time.Time{} + } + + for _, group := range ruleResponse.Data.Groups { + for _, rule := range group.Rules { + if evalTime, err := time.Parse(time.RFC3339Nano, rule.LastEvaluation); err == nil { + return evalTime + } + } + } + + return time.Time{} +} + // queryString returns the expected queryString of a this test. func (p *queryLogTest) queryString() string { switch p.origin { @@ -322,7 +371,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Len(t, ql, qc) } else { - require.Greater(t, len(ql), qc, "no queries logged") + require.GreaterOrEqual(t, len(ql), qc, "no queries logged") } p.validateLastQuery(t, ql) qc = len(ql) @@ -353,7 +402,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Len(t, ql, qc) } else { - require.Greater(t, len(ql), qc, "no queries logged") + require.GreaterOrEqual(t, len(ql), qc, "no queries logged") } p.validateLastQuery(t, ql) From 5c5cb0e7121f605c3060ce01eca940e83f1d7e8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 09:25:04 +0000 Subject: [PATCH 119/137] Bump golang.org/x/tools from 0.24.0 to 0.25.0 Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.24.0 to 0.25.0. - [Release notes](https://github.com/golang/tools/releases) - [Commits](https://github.com/golang/tools/compare/v0.24.0...v0.25.0) --- updated-dependencies: - dependency-name: golang.org/x/tools dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index eb125a66f1..53e77813e1 100644 --- a/go.mod +++ b/go.mod @@ -80,7 +80,7 @@ require ( golang.org/x/sys v0.25.0 golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 - golang.org/x/tools v0.24.0 + golang.org/x/tools v0.25.0 google.golang.org/api v0.199.0 google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 google.golang.org/grpc v1.67.0 @@ -192,7 +192,7 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/crypto v0.27.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.29.0 // indirect golang.org/x/term v0.24.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect diff --git a/go.sum b/go.sum index 423973d8e8..9dbe1cb566 100644 --- a/go.sum +++ b/go.sum @@ -818,8 +818,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1035,8 +1035,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 92be29d0dc96f3d1aa6291b97708672b3a0cb7b3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 09:25:05 +0000 Subject: [PATCH 120/137] Bump google.golang.org/grpc from 1.66.0 to 1.67.1 Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.66.0 to 1.67.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.66.0...v1.67.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index eb125a66f1..5cf9d44c59 100644 --- a/go.mod +++ b/go.mod @@ -83,7 +83,7 @@ require ( golang.org/x/tools v0.24.0 google.golang.org/api v0.199.0 google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 - google.golang.org/grpc v1.67.0 + google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 diff --git a/go.sum b/go.sum index 423973d8e8..b13cdc67bb 100644 --- a/go.sum +++ b/go.sum @@ -1116,8 +1116,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= -google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 563bfdd384bb9ae0b4e2a76a3b87c46ba3faa25d Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Fri, 4 Oct 2024 12:01:54 +0200 Subject: [PATCH 121/137] storage: require selectors to always return matching results Signed-off-by: Jan Fajerski --- docs/querying/remote_read_api.md | 3 ++- storage/interface.go | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/querying/remote_read_api.md b/docs/querying/remote_read_api.md index efbd08e984..76de112342 100644 --- a/docs/querying/remote_read_api.md +++ b/docs/querying/remote_read_api.md @@ -17,7 +17,8 @@ Request are made to the following endpoint. ### Samples -This returns a message that includes a list of raw samples. +This returns a message that includes a list of raw samples matching the +requested query. ### Streamed Chunks diff --git a/storage/interface.go b/storage/interface.go index 7ac93129e8..b7ef14ce96 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -114,6 +114,8 @@ type Querier interface { LabelQuerier // Select returns a set of series that matches the given label matchers. + // Results are not checked whether they match. Results that do not match + // may cause undefined behavior. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet @@ -152,6 +154,8 @@ type ChunkQuerier interface { LabelQuerier // Select returns a set of series that matches the given label matchers. + // Results are not checked whether they match. Results that do not match + // may cause undefined behavior. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet From 47aeca96630344464a139b0963dcc85ff463f84d Mon Sep 17 00:00:00 2001 From: Manik Rana Date: Fri, 4 Oct 2024 17:41:02 +0530 Subject: [PATCH 122/137] feat: naive fixes and optimzations for `CreatedTimestamp` function (#14965) * enhance: wip ct parse optimizations Signed-off-by: Manik Rana * feat: further work on optimization Signed-off-by: Manik Rana * feat: further improvements and remove unused code Signed-off-by: Manik Rana * feat: improve optimizations and fix some CT parse errors Signed-off-by: Manik Rana * fix: check for LsetHash along with name Signed-off-by: Manik Rana * chore: cleanup and documentation Signed-off-by: Manik Rana * enhance: improve comments and add cleaner functions Signed-off-by: Manik Rana * feat: improve comments and add cleaner functions Signed-off-by: Manik Rana * chore: rename to resetCTParseValues Signed-off-by: Manik Rana * fix: post-merge fixes Signed-off-by: Manik Rana * fix: add all possible reserved suffixes Signed-off-by: Manik Rana * test: separate CT values for each metric Signed-off-by: Manik Rana --------- Signed-off-by: Manik Rana Signed-off-by: Manik Rana --- model/textparse/openmetricsparse.go | 106 +++++++--- model/textparse/openmetricsparse_test.go | 259 ++++++++++------------- 2 files changed, 184 insertions(+), 181 deletions(-) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index 8ec1b62ffb..0e82dc9f5c 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -95,6 +95,12 @@ type OpenMetricsParser struct { exemplarTs int64 hasExemplarTs bool + // Created timestamp parsing state. + ct int64 + ctHashSet uint64 + // visitedName is the metric name of the last visited metric when peeking ahead + // for _created series during the execution of the CreatedTimestamp method. + visitedName string skipCTSeries bool } @@ -254,6 +260,9 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { func (p *OpenMetricsParser) CreatedTimestamp() *int64 { if !typeRequiresCT(p.mtype) { // Not a CT supported metric type, fast path. + p.ct = 0 + p.visitedName = "" + p.ctHashSet = 0 return nil } @@ -264,27 +273,44 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { ) p.Metric(&currLset) currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") - // Search for the _created line for the currFamilyLsetHash using ephemeral parser until - // we see EOF or new metric family. We have to do it as we don't know where (and if) - // that CT line is. - // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. - peek := deepCopy(p) + currName := currLset.Get(model.MetricNameLabel) + currName = findBaseMetricName(currName) + + // make sure we're on a new metric before returning + if currName == p.visitedName && currFamilyLsetHash == p.ctHashSet && p.visitedName != "" && p.ctHashSet > 0 && p.ct > 0 { + // CT is already known, fast path. + return &p.ct + } + + // Create a new lexer to reset the parser once this function is done executing. + resetLexer := &openMetricsLexer{ + b: p.l.b, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + + p.skipCTSeries = false + for { - eType, err := peek.Next() + eType, err := p.Next() if err != nil { - // This means peek will give error too later on, so def no CT line found. + // This means p.Next() will give error too later on, so def no CT line found. // This might result in partial scrape with wrong/missing CT, but only // spec improvement would help. - // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + p.resetCTParseValues(resetLexer) return nil } if eType != EntrySeries { // Assume we hit different family, no CT line found. + p.resetCTParseValues(resetLexer) return nil } var peekedLset labels.Labels - peek.Metric(&peekedLset) + p.Metric(&peekedLset) peekedName := peekedLset.Get(model.MetricNameLabel) if !strings.HasSuffix(peekedName, "_created") { // Not a CT line, search more. @@ -294,17 +320,52 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { // We got a CT line here, but let's search if CT line is actually for our series, edge case. peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") if peekWithoutNameLsetHash != currFamilyLsetHash { - // CT line for a different series, for our series no CT. + // Found CT line for a different series, for our series no CT. + p.resetCTParseValues(resetLexer) return nil } // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps - ct := int64(peek.val * 1000.0) + ct := int64(p.val * 1000.0) + p.setCTParseValues(ct, currFamilyLsetHash, currName, true, resetLexer) return &ct } } +// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found. +// This is useful to prevent re-parsing the same series again and early return the CT value. +func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, visitedName string, skipCTSeries bool, resetLexer *openMetricsLexer) { + p.ct = ct + p.l = resetLexer + p.ctHashSet = ctHashSet + p.visitedName = visitedName + p.skipCTSeries = skipCTSeries +} + +// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called. +func (p *OpenMetricsParser) resetCTParseValues(resetLexer *openMetricsLexer) { + p.l = resetLexer + p.ct = 0 + p.ctHashSet = 0 + p.visitedName = "" + p.skipCTSeries = true +} + +// findBaseMetricName returns the metric name without reserved suffixes such as "_created", +// "_sum", etc. based on the OpenMetrics specification found at +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md. +// If no suffix is found, the original name is returned. +func findBaseMetricName(name string) string { + suffixes := []string{"_created", "_count", "_sum", "_bucket", "_total", "_gcount", "_gsum", "_info"} + for _, suffix := range suffixes { + if strings.HasSuffix(name, suffix) { + return strings.TrimSuffix(name, suffix) + } + } + return name +} + // typeRequiresCT returns true if the metric type requires a _created timestamp. func typeRequiresCT(t model.MetricType) bool { switch t { @@ -315,29 +376,6 @@ func typeRequiresCT(t model.MetricType) bool { } } -// deepCopy creates a copy of a parser without re-using the slices' original memory addresses. -func deepCopy(p *OpenMetricsParser) OpenMetricsParser { - newB := make([]byte, len(p.l.b)) - copy(newB, p.l.b) - - newLexer := &openMetricsLexer{ - b: newB, - i: p.l.i, - start: p.l.start, - err: p.l.err, - state: p.l.state, - } - - newParser := OpenMetricsParser{ - l: newLexer, - builder: p.builder, - mtype: p.mtype, - val: p.val, - skipCTSeries: false, - } - return newParser -} - // nextToken returns the next token from the openMetricsLexer. func (p *OpenMetricsParser) nextToken() token { tok := p.l.Lex() diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index 93033380b0..bbb7c07306 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -79,17 +79,38 @@ bar_count 17.0 bar_sum 324789.3 bar{quantile="0.95"} 123.7 bar{quantile="0.99"} 150.0 -bar_created 1520872607.123 +bar_created 1520872608.124 # HELP baz Histogram with the same objective as above's summary # TYPE baz histogram baz_bucket{le="0.0"} 0 baz_bucket{le="+Inf"} 17 baz_count 17 baz_sum 324789.3 -baz_created 1520872607.123 +baz_created 1520872609.125 # HELP fizz_created Gauge which shouldn't be parsed as CT # TYPE fizz_created gauge -fizz_created 17.0` +fizz_created 17.0 +# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_created 1520430001 +something_bucket{le="0.0"} 1 +something_bucket{le="+Inf"} 18 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 20 +yum_sum 324789.5 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_count 21 +foobar_created 1520430004 +foobar_sum 324789.6 +foobar{quantile="0.95"} 123.8 +foobar{quantile="0.99"} 150.1` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" @@ -269,22 +290,22 @@ fizz_created 17.0` m: "bar_count", v: 17.0, lset: labels.FromStrings("__name__", "bar_count"), - ct: int64p(1520872607123), + ct: int64p(1520872608124), }, { m: "bar_sum", v: 324789.3, lset: labels.FromStrings("__name__", "bar_sum"), - ct: int64p(1520872607123), + ct: int64p(1520872608124), }, { m: `bar{quantile="0.95"}`, v: 123.7, lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), - ct: int64p(1520872607123), + ct: int64p(1520872608124), }, { m: `bar{quantile="0.99"}`, v: 150.0, lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), - ct: int64p(1520872607123), + ct: int64p(1520872608124), }, { m: "baz", help: "Histogram with the same objective as above's summary", @@ -295,22 +316,22 @@ fizz_created 17.0` m: `baz_bucket{le="0.0"}`, v: 0, lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), - ct: int64p(1520872607123), + ct: int64p(1520872609125), }, { m: `baz_bucket{le="+Inf"}`, v: 17, lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), - ct: int64p(1520872607123), + ct: int64p(1520872609125), }, { m: `baz_count`, v: 17, lset: labels.FromStrings("__name__", "baz_count"), - ct: int64p(1520872607123), + ct: int64p(1520872609125), }, { m: `baz_sum`, v: 324789.3, lset: labels.FromStrings("__name__", "baz_sum"), - ct: int64p(1520872607123), + ct: int64p(1520872609125), }, { m: "fizz_created", help: "Gauge which shouldn't be parsed as CT", @@ -321,6 +342,84 @@ fizz_created 17.0` m: `fizz_created`, v: 17, lset: labels.FromStrings("__name__", "fizz_created"), + }, { + m: "something", + help: "Histogram with _created between buckets and summary", + }, { + m: "something", + typ: model.MetricTypeHistogram, + }, { + m: `something_count`, + v: 18, + lset: labels.FromStrings("__name__", "something_count"), + ct: int64p(1520430001000), + }, { + m: `something_sum`, + v: 324789.4, + lset: labels.FromStrings("__name__", "something_sum"), + ct: int64p(1520430001000), + }, { + m: `something_bucket{le="0.0"}`, + v: 1, + lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"), + ct: int64p(1520430001000), + }, { + m: `something_bucket{le="+Inf"}`, + v: 18, + lset: labels.FromStrings("__name__", "something_bucket", "le", "+Inf"), + ct: int64p(1520430001000), + }, { + m: "yum", + help: "Summary with _created between sum and quantiles", + }, { + m: "yum", + typ: model.MetricTypeSummary, + }, { + m: `yum_count`, + v: 20, + lset: labels.FromStrings("__name__", "yum_count"), + ct: int64p(1520430003000), + }, { + m: `yum_sum`, + v: 324789.5, + lset: labels.FromStrings("__name__", "yum_sum"), + ct: int64p(1520430003000), + }, { + m: `yum{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"), + ct: int64p(1520430003000), + }, { + m: `yum{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"), + ct: int64p(1520430003000), + }, { + m: "foobar", + help: "Summary with _created as the first line", + }, { + m: "foobar", + typ: model.MetricTypeSummary, + }, { + m: `foobar_count`, + v: 21, + lset: labels.FromStrings("__name__", "foobar_count"), + ct: int64p(1520430004000), + }, { + m: `foobar_sum`, + v: 324789.6, + lset: labels.FromStrings("__name__", "foobar_sum"), + ct: int64p(1520430004000), + }, { + m: `foobar{quantile="0.95"}`, + v: 123.8, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"), + ct: int64p(1520430004000), + }, { + m: `foobar{quantile="0.99"}`, + v: 150.1, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"), + ct: int64p(1520430004000), }, { m: "metric", help: "foo\x00bar", @@ -784,34 +883,13 @@ func TestOMNullByteHandling(t *testing.T) { // these tests show them. // TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. func TestCTParseFailures(t *testing.T) { - input := `# HELP something Histogram with _created between buckets and summary -# TYPE something histogram -something_count 17 -something_sum 324789.3 -something_created 1520872607.123 -something_bucket{le="0.0"} 0 -something_bucket{le="+Inf"} 17 -# HELP thing Histogram with _created as first line + input := `# HELP thing Histogram with _created as first line # TYPE thing histogram thing_created 1520872607.123 thing_count 17 thing_sum 324789.3 thing_bucket{le="0.0"} 0 -thing_bucket{le="+Inf"} 17 -# HELP yum Summary with _created between sum and quantiles -# TYPE yum summary -yum_count 17.0 -yum_sum 324789.3 -yum_created 1520872607.123 -yum{quantile="0.95"} 123.7 -yum{quantile="0.99"} 150.0 -# HELP foobar Summary with _created as the first line -# TYPE foobar summary -foobar_created 1520872607.123 -foobar_count 17.0 -foobar_sum 324789.3 -foobar{quantile="0.95"} 123.7 -foobar{quantile="0.99"} 150.0` +thing_bucket{le="+Inf"} 17` input += "\n# EOF\n" @@ -827,30 +905,6 @@ foobar{quantile="0.99"} 150.0` exp := []expectCT{ { - m: "something", - help: "Histogram with _created between buckets and summary", - isErr: false, - }, { - m: "something", - typ: model.MetricTypeHistogram, - isErr: false, - }, { - m: `something_count`, - ct: int64p(1520872607123), - isErr: false, - }, { - m: `something_sum`, - ct: int64p(1520872607123), - isErr: false, - }, { - m: `something_bucket{le="0.0"}`, - ct: int64p(1520872607123), - isErr: true, - }, { - m: `something_bucket{le="+Inf"}`, - ct: int64p(1520872607123), - isErr: true, - }, { m: "thing", help: "Histogram with _created as first line", isErr: false, @@ -874,54 +928,6 @@ foobar{quantile="0.99"} 150.0` m: `thing_bucket{le="+Inf"}`, ct: int64p(1520872607123), isErr: true, - }, { - m: "yum", - help: "Summary with _created between summary and quantiles", - isErr: false, - }, { - m: "yum", - typ: model.MetricTypeSummary, - isErr: false, - }, { - m: "yum_count", - ct: int64p(1520872607123), - isErr: false, - }, { - m: "yum_sum", - ct: int64p(1520872607123), - isErr: false, - }, { - m: `yum{quantile="0.95"}`, - ct: int64p(1520872607123), - isErr: true, - }, { - m: `yum{quantile="0.99"}`, - ct: int64p(1520872607123), - isErr: true, - }, { - m: "foobar", - help: "Summary with _created as the first line", - isErr: false, - }, { - m: "foobar", - typ: model.MetricTypeSummary, - isErr: false, - }, { - m: "foobar_count", - ct: int64p(1520430004), - isErr: true, - }, { - m: "foobar_sum", - ct: int64p(1520430004), - isErr: true, - }, { - m: `foobar{quantile="0.95"}`, - ct: int64p(1520430004), - isErr: true, - }, { - m: `foobar{quantile="0.99"}`, - ct: int64p(1520430004), - isErr: true, }, } @@ -953,47 +959,6 @@ foobar{quantile="0.99"} 150.0` } } -func TestDeepCopy(t *testing.T) { - input := []byte(`# HELP go_goroutines A gauge goroutines. -# TYPE go_goroutines gauge -go_goroutines 33 123.123 -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds -go_gc_duration_seconds_created`) - - st := labels.NewSymbolTable() - parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser) - - // Modify the original parser state - _, err := parser.Next() - require.NoError(t, err) - require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) - require.True(t, parser.skipCTSeries) - - // Create a deep copy of the parser - copyParser := deepCopy(parser) - etype, err := copyParser.Next() - require.NoError(t, err) - require.Equal(t, EntryType, etype) - require.True(t, parser.skipCTSeries) - require.False(t, copyParser.skipCTSeries) - - // Modify the original parser further - parser.Next() - parser.Next() - parser.Next() - require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) - require.Equal(t, "summary", string(parser.mtype)) - require.False(t, copyParser.skipCTSeries) - require.True(t, parser.skipCTSeries) - - // Ensure the copy remains unchanged - copyParser.Next() - copyParser.Next() - require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]])) - require.False(t, copyParser.skipCTSeries) -} - func BenchmarkOMParseCreatedTimestamp(b *testing.B) { for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { From 3d2194f56179300c3aa6653d1d72b225f85f7e9f Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 4 Oct 2024 13:54:03 +0200 Subject: [PATCH 123/137] Style cleanups, mostly for web notifications and startup alert Some of the changes are a bit unreadable because the previous files were not saved with the project's linter / auto-formatter settings applied. But it's basically: * For icons that are not Mantine-native components, use the rem() function for computing their size, so they scale correctly with the root font size. See https://mantine.dev/styles/rem/. * Try a different icon for the notifications tray, since the bell icon was already used for Prometheus alerts. Other candidates from https://tabler.io/icons would be IconExclamationCircle or IconDeviceDesktopExclamation or IconMessageCircleExclamation. * The server startup alert looked a bit cramped, introduced a Stack to add spacing between the text and the progress bar. * Added a bit of spacing between notification text and date. Things looked cramped. To make things look ok with that, I also top-aligned the notification text and icon. Signed-off-by: Julius Volz --- .../src/components/NotificationsIcon.tsx | 136 ++++++++++++------ .../src/components/ReadinessWrapper.tsx | 34 +++-- web/ui/mantine-ui/src/pages/RulesPage.tsx | 9 +- .../query/MetricsExplorer/LabelsExplorer.tsx | 5 +- 4 files changed, 121 insertions(+), 63 deletions(-) diff --git a/web/ui/mantine-ui/src/components/NotificationsIcon.tsx b/web/ui/mantine-ui/src/components/NotificationsIcon.tsx index 5ab28b037a..6d5afa1901 100644 --- a/web/ui/mantine-ui/src/components/NotificationsIcon.tsx +++ b/web/ui/mantine-ui/src/components/NotificationsIcon.tsx @@ -1,61 +1,105 @@ -import { ActionIcon, Indicator, Popover, Card, Text, Stack, ScrollArea, Group } from "@mantine/core"; -import { IconBell, IconAlertTriangle, IconNetworkOff } from "@tabler/icons-react"; -import { useNotifications } from '../state/useNotifications'; +import { + ActionIcon, + Indicator, + Popover, + Card, + Text, + Stack, + ScrollArea, + Group, + rem, +} from "@mantine/core"; +import { + IconAlertTriangle, + IconNetworkOff, + IconMessageExclamation, +} from "@tabler/icons-react"; +import { useNotifications } from "../state/useNotifications"; import { actionIconStyle } from "../styles"; -import { useSettings } from '../state/settingsSlice'; +import { useSettings } from "../state/settingsSlice"; import { formatTimestamp } from "../lib/formatTime"; const NotificationsIcon = () => { const { notifications, isConnectionError } = useNotifications(); const { useLocalTime } = useSettings(); - return ( - (notifications.length === 0 && !isConnectionError) ? null : ( - - - - - - - + return notifications.length === 0 && !isConnectionError ? null : ( + + + + + + + - - - Notifications - - { isConnectionError ? ( - - - - - Real-time notifications interrupted. - Please refresh the page or check your connection. - - - - ) : notifications.length === 0 ? ( - No notifications - ) : (notifications.map((notification, index) => ( + + + + Notifications + + + {isConnectionError ? ( + + + + + + Real-time notifications interrupted. + + + Please refresh the page or check your connection. + + + + + ) : notifications.length === 0 ? ( + + No notifications + + ) : ( + notifications.map((notification, index) => ( - - - - {notification.text} - {formatTimestamp(new Date(notification.date).valueOf() / 1000, useLocalTime)} + + + + + {notification.text} + + + {formatTimestamp( + new Date(notification.date).valueOf() / 1000, + useLocalTime + )} + - )))} - - - - - - ) + )) + )} + + + + + ); }; diff --git a/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx b/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx index dbfcba5550..2e471de5e3 100644 --- a/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx +++ b/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx @@ -4,7 +4,7 @@ import { useAppDispatch } from "../state/hooks"; import { updateSettings, useSettings } from "../state/settingsSlice"; import { useSuspenseAPIQuery } from "../api/api"; import { WALReplayStatus } from "../api/responseTypes/walreplay"; -import { Progress, Alert } from "@mantine/core"; +import { Progress, Alert, Stack } from "@mantine/core"; import { useSuspenseQuery } from "@tanstack/react-query"; const STATUS_STARTING = "is starting up..."; @@ -57,14 +57,12 @@ const ReadinessLoader: FC = () => { // Only call WAL replay status API if the service is starting up. const shouldQueryWALReplay = statusMessage === STATUS_STARTING; - const { - data: walData, - isSuccess: walSuccess, - } = useSuspenseAPIQuery({ - path: "/status/walreplay", - key: ["walreplay", queryKey], - enabled: shouldQueryWALReplay, // Only enabled when service is starting up. - }); + const { data: walData, isSuccess: walSuccess } = + useSuspenseAPIQuery({ + path: "/status/walreplay", + key: ["walreplay", queryKey], + enabled: shouldQueryWALReplay, // Only enabled when service is starting up. + }); useEffect(() => { if (ready) { @@ -80,14 +78,18 @@ const ReadinessLoader: FC = () => { return ( } + title={ + "Prometheus " + + ((agentMode && "Agent ") || "") + + (statusMessage || STATUS_LOADING) + } + icon={} maw={500} mx="auto" mt="lg" > {shouldQueryWALReplay && walSuccess && walData && ( - <> + Replaying WAL ({walData.data.current}/{walData.data.max}) @@ -95,9 +97,13 @@ const ReadinessLoader: FC = () => { size="xl" animated color="yellow" - value={((walData.data.current - walData.data.min + 1) / (walData.data.max - walData.data.min + 1)) * 100} + value={ + ((walData.data.current - walData.data.min + 1) / + (walData.data.max - walData.data.min + 1)) * + 100 + } /> - + )} ); diff --git a/web/ui/mantine-ui/src/pages/RulesPage.tsx b/web/ui/mantine-ui/src/pages/RulesPage.tsx index ce0097776a..a4ed44e7c2 100644 --- a/web/ui/mantine-ui/src/pages/RulesPage.tsx +++ b/web/ui/mantine-ui/src/pages/RulesPage.tsx @@ -4,6 +4,7 @@ import { Badge, Card, Group, + rem, Stack, Text, Tooltip, @@ -135,11 +136,15 @@ export default function RulesPage() { {r.type === "alerting" ? ( - + ) : ( - + )} {r.name} diff --git a/web/ui/mantine-ui/src/pages/query/MetricsExplorer/LabelsExplorer.tsx b/web/ui/mantine-ui/src/pages/query/MetricsExplorer/LabelsExplorer.tsx index 782fb5cf48..d18c017b18 100644 --- a/web/ui/mantine-ui/src/pages/query/MetricsExplorer/LabelsExplorer.tsx +++ b/web/ui/mantine-ui/src/pages/query/MetricsExplorer/LabelsExplorer.tsx @@ -21,6 +21,7 @@ import { Skeleton, Stack, Table, + rem, } from "@mantine/core"; import { escapeString } from "../../../lib/escapeString"; import serializeNode from "../../../promql/serialize"; @@ -326,7 +327,9 @@ const LabelsExplorer: FC = ({ title="Cancel" style={{ flexShrink: 0 }} > - + ) : ( From 6c3d11629b3c46e17267e3da652bd51e6090d316 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Sat, 5 Oct 2024 17:05:42 -0700 Subject: [PATCH 124/137] add missing flag storage.tsdb.allow-overlapping-compaction Signed-off-by: Ben Ye --- cmd/prometheus/main.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 11d8caae6f..f670bc8b8c 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -384,6 +384,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). Default("false").BoolVar(&cfg.tsdb.NoLockfile) + serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks."). + Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction) + serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) From ab64966e9d21ce3a3e42415da3a4227f8220b15c Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sun, 6 Oct 2024 18:35:29 +0200 Subject: [PATCH 125/137] fix: use "ErrorContains" or "EqualError" instead of "Contains(t, err.Error()" and "Equal(t, err.Error()" (#15094) * fix: use "ErrorContains" or "EqualError" instead of "Contains(t, err.Error()" and "Equal(t, err.Error()" --------- Signed-off-by: Matthieu MOREL Signed-off-by: Arve Knudsen Co-authored-by: Arve Knudsen --- .golangci.yml | 12 +----------- cmd/promtool/main_test.go | 8 ++++---- config/config_test.go | 3 +-- discovery/consul/consul_test.go | 2 +- discovery/hetzner/robot_test.go | 3 +-- discovery/openstack/hypervisor_test.go | 3 +-- discovery/openstack/instance_test.go | 3 +-- discovery/triton/triton_test.go | 7 ++----- discovery/xds/client_test.go | 6 ++---- discovery/xds/kuma_test.go | 3 +-- model/rulefmt/rulefmt_test.go | 11 ++++------- model/textparse/interface_test.go | 3 +-- model/textparse/openmetricsparse_test.go | 6 +++--- model/textparse/promparse_test.go | 6 ++---- promql/fuzz_test.go | 2 +- promql/parser/parse_test.go | 5 ++--- scrape/scrape_test.go | 4 ++-- storage/fanout_test.go | 18 ++++++------------ storage/remote/chunked_test.go | 4 ++-- storage/remote/codec_test.go | 5 ++--- tsdb/block_test.go | 4 ++-- tsdb/compact_test.go | 3 +-- tsdb/head_test.go | 3 +-- web/api/v1/api_test.go | 2 +- 24 files changed, 45 insertions(+), 81 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 303cd33d8b..d476be743b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -153,14 +153,4 @@ linters-settings: disable: - float-compare - go-require - enable: - - bool-compare - - compares - - empty - - error-is-as - - error-nil - - expected-actual - - len - - require-error - - suite-dont-use-pkg - - suite-extra-assert-call + enable-all: true diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 9d891c32fd..698e6641d1 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -146,7 +146,7 @@ func TestCheckSDFile(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, err := checkSDFile(test.file) if test.err != "" { - require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -228,7 +228,7 @@ func TestCheckTargetConfig(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { - require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -315,7 +315,7 @@ func TestCheckConfigSyntax(t *testing.T) { expectedErrMsg = test.errWindows } if expectedErrMsg != "" { - require.Equalf(t, expectedErrMsg, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, expectedErrMsg, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -345,7 +345,7 @@ func TestAuthorizationConfig(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { - require.Contains(t, err.Error(), test.err, "Expected error to contain %q, got %q", test.err, err.Error()) + require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error()) return } require.NoError(t, err) diff --git a/config/config_test.go b/config/config_test.go index 66377f6879..47241e6212 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2095,8 +2095,7 @@ func TestBadConfigs(t *testing.T) { }() for _, ee := range expectedErrors { _, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) - require.Error(t, err, "%s", ee.filename) - require.Contains(t, err.Error(), ee.errMsg, + require.ErrorContains(t, err, ee.errMsg, "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) } } diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index e3bc7938f5..e288a5b2ae 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -407,7 +407,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { err = d.getDatacenter() // An error should be returned. - require.Equal(t, tc.errMessage, err.Error()) + require.EqualError(t, err, tc.errMessage) // Should still be empty. require.Equal(t, "", d.clientDatacenter) } diff --git a/discovery/hetzner/robot_test.go b/discovery/hetzner/robot_test.go index abee5fea90..814bccd51f 100644 --- a/discovery/hetzner/robot_test.go +++ b/discovery/hetzner/robot_test.go @@ -95,8 +95,7 @@ func TestRobotSDRefreshHandleError(t *testing.T) { require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) - require.Error(t, err) - require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error()) + require.EqualError(t, err, "non 2xx status '401' response during hetzner service discovery with role robot") require.Empty(t, targetGroups) } diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go index 45684b4a2e..e4a97f32cf 100644 --- a/discovery/openstack/hypervisor_test.go +++ b/discovery/openstack/hypervisor_test.go @@ -93,6 +93,5 @@ func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := hypervisor.refresh(ctx) - require.Error(t, err) - require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) + require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) } diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index 2b5ac1b89e..2617baa4e3 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -134,6 +134,5 @@ func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := hypervisor.refresh(ctx) - require.Error(t, err) - require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) + require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) } diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index e37693e6bf..b2d06afaf6 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -21,7 +21,6 @@ import ( "net/http/httptest" "net/url" "strconv" - "strings" "testing" "github.com/prometheus/client_golang/prometheus" @@ -182,8 +181,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) { td, m, _ := newTritonDiscovery(conf) _, err := td.refresh(context.Background()) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint")) + require.ErrorContains(t, err, "an error occurred when requesting targets from the discovery endpoint") m.Unregister() } @@ -193,8 +191,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := td.refresh(ctx) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), context.Canceled.Error())) + require.ErrorContains(t, err, context.Canceled.Error()) m.Unregister() } diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index b699995fb7..2cf5b2f9cb 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -52,16 +52,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.Error(t, err) - require.Equal(t, "invalid xDS server URL", err.Error()) + require.EqualError(t, err, "invalid xDS server URL") } func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.Error(t, err) - require.Contains(t, err.Error(), "must be either 'http' or 'https'") + require.ErrorContains(t, err, "must be either 'http' or 'https'") } func TestMakeXDSResourceHttpEndpoint(t *testing.T) { diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index cfb9cbac50..23d754c4b7 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -201,9 +201,8 @@ func TestKumaMadsV1ResourceParserInvalidResources(t *testing.T) { }} groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL) require.Nil(t, groups) - require.Error(t, err) - require.Contains(t, err.Error(), "cannot parse") + require.ErrorContains(t, err, "cannot parse") } func TestNewKumaHTTPDiscovery(t *testing.T) { diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index ef5008f4bf..669f1da4e0 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -85,9 +85,8 @@ func TestParseFileFailure(t *testing.T) { for _, c := range table { _, errs := ParseFile(filepath.Join("testdata", c.filename)) - require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename) - require.Error(t, errs[0]) - require.Containsf(t, errs[0].Error(), c.errMsg, "Expected error for %s.", c.filename) + require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename) + require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename) } } @@ -259,8 +258,7 @@ func TestError(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := tt.error.Error() - require.Equal(t, tt.want, got) + require.EqualError(t, tt.error, tt.want) }) } } @@ -308,8 +306,7 @@ func TestWrappedError(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := tt.wrappedError.Error() - require.Equal(t, tt.want, got) + require.EqualError(t, tt.wrappedError, tt.want) }) } } diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index 970b96706e..e010cb36ec 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -98,8 +98,7 @@ func TestNewParser(t *testing.T) { if tt.err == "" { require.NoError(t, err) } else { - require.Error(t, err) - require.Contains(t, err.Error(), tt.err) + require.ErrorContains(t, err, tt.err) } }) } diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index bbb7c07306..ea1f2a25f9 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -804,7 +804,7 @@ func TestOpenMetricsParseErrors(t *testing.T) { for err == nil { _, err = p.Next() } - require.Equal(t, c.err, err.Error(), "test %d: %s", i, c.input) + require.EqualError(t, err, c.err, "test %d: %s", i, c.input) } } @@ -871,11 +871,11 @@ func TestOMNullByteHandling(t *testing.T) { } if c.err == "" { - require.Equal(t, io.EOF, err, "test %d", i) + require.ErrorIs(t, err, io.EOF, "test %d", i) continue } - require.Equal(t, c.err, err.Error(), "test %d", i) + require.EqualError(t, err, c.err, "test %d", i) } } diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 4520dfe9a9..e0337f8fd9 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -423,8 +423,7 @@ func TestPromParseErrors(t *testing.T) { for err == nil { _, err = p.Next() } - require.Error(t, err) - require.Equal(t, c.err, err.Error(), "test %d", i) + require.EqualError(t, err, c.err, "test %d", i) } } @@ -483,8 +482,7 @@ func TestPromNullByteHandling(t *testing.T) { continue } - require.Error(t, err) - require.Equal(t, c.err, err.Error(), "test %d", i) + require.EqualError(t, err, c.err, "test %d", i) } } diff --git a/promql/fuzz_test.go b/promql/fuzz_test.go index 1f0bbaa662..4a26798ded 100644 --- a/promql/fuzz_test.go +++ b/promql/fuzz_test.go @@ -29,7 +29,7 @@ func TestfuzzParseMetricWithContentTypePanicOnInvalid(t *testing.T) { } else { err, ok := p.(error) require.True(t, ok) - require.Contains(t, err.Error(), "duplicate parameter name") + require.ErrorContains(t, err, "duplicate parameter name") } }() diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index f50137b6de..b5096b7775 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -3925,8 +3925,7 @@ func TestParseExpressions(t *testing.T) { require.Equal(t, expected, expr, "error on input '%s'", test.input) } else { - require.Error(t, err) - require.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error()) + require.ErrorContains(t, err, test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error()) var errorList ParseErrors ok := errors.As(err, &errorList) @@ -4468,7 +4467,7 @@ func TestRecoverParserError(t *testing.T) { e := errors.New("custom error") defer func() { - require.Equal(t, e.Error(), err.Error()) + require.EqualError(t, err, e.Error()) }() defer p.recover(&err) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 57c51b2e92..f065ecebbc 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -441,7 +441,7 @@ func TestScrapePoolTargetLimit(t *testing.T) { lerr := l.(*testLoop).getForcedError() if shouldErr { require.Error(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit) - require.Equal(t, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit), lerr.Error()) + require.EqualError(t, lerr, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit)) } else { require.NoError(t, lerr) } @@ -2549,7 +2549,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) { resp, err := ts.scrape(context.Background()) require.NoError(t, err) _, err = ts.readResponse(context.Background(), resp, io.Discard) - require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err) + require.ErrorContains(t, err, "404", "Expected \"404 NotFound\" error but got: %s", err) } func TestTargetScraperBodySizeLimit(t *testing.T) { diff --git a/storage/fanout_test.go b/storage/fanout_test.go index 4613fe7572..3eef9e3cd0 100644 --- a/storage/fanout_test.go +++ b/storage/fanout_test.go @@ -173,16 +173,13 @@ func TestFanoutErrors(t *testing.T) { } if tc.err != nil { - require.Error(t, ss.Err()) - require.Equal(t, tc.err.Error(), ss.Err().Error()) + require.EqualError(t, ss.Err(), tc.err.Error()) } if tc.warning != nil { - require.NotEmpty(t, ss.Warnings(), "warnings expected") w := ss.Warnings() - require.Error(t, w.AsErrors()[0]) - warn, _ := w.AsStrings("", 0, 0) - require.Equal(t, tc.warning.Error(), warn[0]) + require.NotEmpty(t, w, "warnings expected") + require.EqualError(t, w.AsErrors()[0], tc.warning.Error()) } }) t.Run("chunks", func(t *testing.T) { @@ -200,16 +197,13 @@ func TestFanoutErrors(t *testing.T) { } if tc.err != nil { - require.Error(t, ss.Err()) - require.Equal(t, tc.err.Error(), ss.Err().Error()) + require.EqualError(t, ss.Err(), tc.err.Error()) } if tc.warning != nil { - require.NotEmpty(t, ss.Warnings(), "warnings expected") w := ss.Warnings() - require.Error(t, w.AsErrors()[0]) - warn, _ := w.AsStrings("", 0, 0) - require.Equal(t, tc.warning.Error(), warn[0]) + require.NotEmpty(t, w, "warnings expected") + require.EqualError(t, w.AsErrors()[0], tc.warning.Error()) } }) } diff --git a/storage/remote/chunked_test.go b/storage/remote/chunked_test.go index 7c3993ca62..82ed866345 100644 --- a/storage/remote/chunked_test.go +++ b/storage/remote/chunked_test.go @@ -86,7 +86,7 @@ func TestChunkedReader_Overflow(t *testing.T) { _, err = NewChunkedReader(bytes.NewReader(b2), 11, nil).Next() require.Error(t, err, "expect exceed limit error") - require.Equal(t, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes", err.Error()) + require.EqualError(t, err, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes") } func TestChunkedReader_CorruptedFrame(t *testing.T) { @@ -102,5 +102,5 @@ func TestChunkedReader_CorruptedFrame(t *testing.T) { _, err = NewChunkedReader(bytes.NewReader(bs), 20, nil).Next() require.Error(t, err, "expected malformed frame") - require.Equal(t, "chunkedReader: corrupted frame; checksum mismatch", err.Error()) + require.EqualError(t, err, "chunkedReader: corrupted frame; checksum mismatch") } diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 404f1add75..5b058d84ec 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -253,8 +253,7 @@ func TestValidateLabelsAndMetricName(t *testing.T) { t.Run(test.description, func(t *testing.T) { err := validateLabelsAndMetricName(test.input) if test.expectedErr != "" { - require.Error(t, err) - require.Equal(t, test.expectedErr, err.Error()) + require.EqualError(t, err, test.expectedErr) } else { require.NoError(t, err) } @@ -551,7 +550,7 @@ func TestNegotiateResponseType(t *testing.T) { _, err = NegotiateResponseType([]prompb.ReadRequest_ResponseType{20}) require.Error(t, err, "expected error due to not supported requested response types") - require.Equal(t, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]", err.Error()) + require.EqualError(t, err, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]") } func TestMergeLabels(t *testing.T) { diff --git a/tsdb/block_test.go b/tsdb/block_test.go index b418a1382d..bd86b27814 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -192,7 +192,7 @@ func TestCorruptedChunk(t *testing.T) { // Check open err. b, err := OpenBlock(nil, blockDir, nil) if tc.openErr != nil { - require.Equal(t, tc.openErr.Error(), err.Error()) + require.EqualError(t, err, tc.openErr.Error()) return } defer func() { require.NoError(t, b.Close()) }() @@ -206,7 +206,7 @@ func TestCorruptedChunk(t *testing.T) { require.True(t, set.Next()) it := set.At().Iterator(nil) require.Equal(t, chunkenc.ValNone, it.Next()) - require.Equal(t, tc.iterErr.Error(), it.Err().Error()) + require.EqualError(t, it.Err(), tc.iterErr.Error()) }) } } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 939e933507..d69b70d204 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1045,8 +1045,7 @@ func TestCompaction_populateBlock(t *testing.T) { } err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}, irPostingsFunc) if tc.expErr != nil { - require.Error(t, err) - require.Equal(t, tc.expErr.Error(), err.Error()) + require.EqualError(t, err, tc.expErr.Error()) return } require.NoError(t, err) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 8c401bc6f9..19dcc1f080 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -2414,8 +2414,7 @@ func TestAddDuplicateLabelName(t *testing.T) { add := func(labels labels.Labels, labelName string) { app := h.Appender(context.Background()) _, err := app.Append(0, labels, 0, 0) - require.Error(t, err) - require.Equal(t, fmt.Sprintf(`label name "%s" is not unique: invalid sample`, labelName), err.Error()) + require.EqualError(t, err, fmt.Sprintf(`label name "%s" is not unique: invalid sample`, labelName)) } add(labels.FromStrings("a", "c", "a", "b"), "a") diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 8323e143c0..334c41ce86 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -4176,7 +4176,7 @@ func TestExtractQueryOpts(t *testing.T) { if test.err == nil { require.NoError(t, err) } else { - require.Equal(t, test.err.Error(), err.Error()) + require.EqualError(t, err, test.err.Error()) } }) } From 5a54ae67edc4b140f14df25b3f08e438b5f846a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 6 Oct 2024 20:01:57 +0200 Subject: [PATCH 126/137] Bump actions/checkout from 4.1.6 to 4.2.0 (#15064) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.6 to 4.2.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/a5ac7e51b41094c92402da3b24376905380afc29...d632683dd7b4114ad314bca15554477dd762a938) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- .github/workflows/ci.yml | 26 ++++++++++----------- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/container_description.yml | 4 ++-- .github/workflows/repo_sync.yml | 2 +- .github/workflows/scorecards.yml | 2 +- 7 files changed, 20 insertions(+), 20 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 8f932b759b..bf7f681b69 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -12,7 +12,7 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 1b189926fb..669305ebd3 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2ef0e97a10..2714211dd7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ jobs: # should also be updated. image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment with: @@ -29,7 +29,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... @@ -48,7 +48,7 @@ jobs: # The go version in this image should be N-1 wrt test_go. image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - run: make build # Don't run NPM build; don't run race-detector. - run: make test GO_ONLY=1 test-flags="" @@ -62,7 +62,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment with: @@ -79,7 +79,7 @@ jobs: name: Go tests on Windows runs-on: windows-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.23.x @@ -96,7 +96,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - run: go install ./cmd/promtool/. - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest @@ -121,7 +121,7 @@ jobs: matrix: thread: [ 0, 1, 2 ] steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/build with: @@ -146,7 +146,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/build with: @@ -169,7 +169,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: @@ -182,7 +182,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: @@ -208,7 +208,7 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/publish_main with: @@ -225,7 +225,7 @@ jobs: || (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/publish_release with: @@ -240,7 +240,7 @@ jobs: needs: [test_ui, codeql] steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - name: Install nodejs uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1466f4ec2b..77fbd4dafb 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Initialize CodeQL uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml index 8ddbc34aeb..144859486d 100644 --- a/.github/workflows/container_description.yml +++ b/.github/workflows/container_description.yml @@ -18,7 +18,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Set docker hub repo name run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - name: Push README to Dockerhub @@ -40,7 +40,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Set quay.io org name run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV - name: Set quay.io repo name diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index 537e9abd84..aa306c46d0 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -13,7 +13,7 @@ jobs: container: image: quay.io/prometheus/golang-builder steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - run: ./scripts/sync_repo_files.sh env: GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index b5fbc7c946..c63727f7f1 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -21,7 +21,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0 with: persist-credentials: false From 8d281c3491564738aab1f6735eea4542428d999f Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Mon, 7 Oct 2024 10:55:26 +0200 Subject: [PATCH 127/137] textparse: Refactored benchmark (#15083) * textparse: Refactored benchmark. Signed-off-by: bwplotka * Kill sinks, tested, nothing is inlined. Signed-off-by: bwplotka * Addressed comments. Signed-off-by: bwplotka --------- Signed-off-by: bwplotka --- model/textparse/benchmark_test.go | 177 ++++++++++++++++ model/textparse/openmetricsparse_test.go | 44 ---- model/textparse/promparse_test.go | 191 ------------------ model/textparse/protobufparse_test.go | 4 +- model/textparse/{ => testdata}/omtestdata.txt | 0 .../{ => testdata}/promtestdata.nometa.txt | 0 .../textparse/{ => testdata}/promtestdata.txt | 0 7 files changed, 180 insertions(+), 236 deletions(-) create mode 100644 model/textparse/benchmark_test.go rename model/textparse/{ => testdata}/omtestdata.txt (100%) rename model/textparse/{ => testdata}/promtestdata.nometa.txt (100%) rename model/textparse/{ => testdata}/promtestdata.txt (100%) diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go new file mode 100644 index 0000000000..3b8b8f305e --- /dev/null +++ b/model/textparse/benchmark_test.go @@ -0,0 +1,177 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" +) + +type newParser func([]byte, *labels.SymbolTable) Parser + +var newTestParserFns = map[string]newParser{ + "promtext": NewPromParser, + "promproto": func(b []byte, st *labels.SymbolTable) Parser { + return NewProtobufParser(b, true, st) + }, + "omtext": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + }, +} + +// BenchmarkParse benchmarks parsing, mimicking how scrape/scrape.go#append use it. +// Typically used as follows: +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParse' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +// For profiles, add -memprofile=${bench}.mem.pprof -cpuprofile=${bench}.cpu.pprof +// options. +// +// NOTE(bwplotka): Previous iterations of this benchmark had different cases for isolated +// Series, Series+Metrics with and without reuse, Series+CT. Those cases are sometimes +// good to know if you are working on a certain optimization, but it does not +// make sense to persist such cases for everybody (e.g. for CI one day). +// For local iteration, feel free to adjust cases/comment out code etc. +// +// NOTE(bwplotka): Do not try to conclude "what parser (OM, proto, prom) is the fastest" +// as the testdata has different amount and type of metrics and features (e.g. exemplars). +func BenchmarkParse(b *testing.B) { + for _, bcase := range []struct { + dataFile string // Localized to "./testdata". + dataProto []byte + parser string + + compareToExpfmtFormat expfmt.FormatType + }{ + {dataFile: "promtestdata.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain}, + {dataFile: "promtestdata.nometa.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain}, + + // We don't pass compareToExpfmtFormat: expfmt.TypeProtoDelim as expfmt does not support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430. + {dataProto: createTestProtoBuf(b).Bytes(), parser: "promproto"}, + + // We don't pass compareToExpfmtFormat: expfmt.TypeOpenMetrics as expfmt does not support OM exemplars, see https://github.com/prometheus/common/issues/703. + {dataFile: "omtestdata.txt", parser: "omtext"}, + {dataFile: "promtestdata.txt", parser: "omtext"}, // Compare how omtext parser deals with Prometheus text format vs promtext. + } { + var buf []byte + dataCase := bcase.dataFile + if len(bcase.dataProto) > 0 { + dataCase = "createTestProtoBuf()" + buf = bcase.dataProto + } else { + f, err := os.Open(filepath.Join("testdata", bcase.dataFile)) + require.NoError(b, err) + b.Cleanup(func() { + _ = f.Close() + }) + buf, err = io.ReadAll(f) + require.NoError(b, err) + } + b.Run(fmt.Sprintf("data=%v/parser=%v", dataCase, bcase.parser), func(b *testing.B) { + newParserFn := newTestParserFns[bcase.parser] + var ( + res labels.Labels + e exemplar.Exemplar + ) + + b.SetBytes(int64(len(buf))) + b.ReportAllocs() + b.ResetTimer() + + st := labels.NewSymbolTable() + for i := 0; i < b.N; i++ { + p := newParserFn(buf, st) + + Inner: + for { + t, err := p.Next() + switch t { + case EntryInvalid: + if errors.Is(err, io.EOF) { + break Inner + } + b.Fatal(err) + case EntryType: + _, _ = p.Type() + continue + case EntryHelp: + _, _ = p.Help() + continue + case EntryUnit: + _, _ = p.Unit() + continue + case EntryComment: + continue + case EntryHistogram: + _, _, _, _ = p.Histogram() + case EntrySeries: + _, _, _ = p.Series() + default: + b.Fatal("not implemented entry", t) + } + + _ = p.Metric(&res) + _ = p.CreatedTimestamp() + for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { + } + } + } + }) + + b.Run(fmt.Sprintf("data=%v/parser=xpfmt", dataCase), func(b *testing.B) { + if bcase.compareToExpfmtFormat == expfmt.TypeUnknown { + b.Skip("compareToExpfmtFormat not set") + } + + b.SetBytes(int64(len(buf))) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + decSamples := make(model.Vector, 0, 50) + sdec := expfmt.SampleDecoder{ + Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(bcase.compareToExpfmtFormat)), + Opts: &expfmt.DecodeOptions{ + Timestamp: model.TimeFromUnixNano(0), + }, + } + + for { + if err := sdec.Decode(&decSamples); err != nil { + if errors.Is(err, io.EOF) { + break + } + b.Fatal(err) + } + decSamples = decSamples[:0] + } + } + }) + } +} diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index ea1f2a25f9..1d2e7feb0d 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -16,7 +16,6 @@ package textparse import ( "errors" "io" - "os" "testing" "github.com/prometheus/common/model" @@ -958,46 +957,3 @@ thing_bucket{le="+Inf"} 17` i++ } } - -func BenchmarkOMParseCreatedTimestamp(b *testing.B) { - for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ - "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { - return NewOpenMetricsParser(b, st) - }, - "openmetrics-skip-ct": func(b []byte, st *labels.SymbolTable) Parser { - return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) - }, - } { - f, err := os.Open("omtestdata.txt") - require.NoError(b, err) - defer f.Close() - - buf, err := io.ReadAll(f) - require.NoError(b, err) - - b.Run(parserName+"/parse-ct/"+"omtestdata.txt", func(b *testing.B) { - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - p.CreatedTimestamp() - } - } - } - }) - } -} diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index e0337f8fd9..e700b35275 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -14,17 +14,13 @@ package textparse import ( - "bytes" "errors" "io" - "os" "strings" "testing" - "github.com/klauspost/compress/gzip" "github.com/stretchr/testify/require" - "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -485,190 +481,3 @@ func TestPromNullByteHandling(t *testing.T) { require.EqualError(t, err, c.err, "test %d", i) } } - -const ( - promtestdataSampleCount = 410 -) - -func BenchmarkPromParse(b *testing.B) { - for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ - "prometheus": NewPromParser, - "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { - return NewOpenMetricsParser(b, st) - }, - } { - for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { - f, err := os.Open(fn) - require.NoError(b, err) - defer f.Close() - - buf, err := io.ReadAll(f) - require.NoError(b, err) - - b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) { - total := 0 - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run(parserName+"/decode-metric/"+fn, func(b *testing.B) { - total := 0 - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - - var res labels.Labels - p.Metric(&res) - - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run(parserName+"/decode-metric-reuse/"+fn, func(b *testing.B) { - total := 0 - var res labels.Labels - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - - p.Metric(&res) - - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run("expfmt-text/"+fn, func(b *testing.B) { - if parserName != "prometheus" { - b.Skip() - } - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - total := 0 - - for i := 0; i < b.N; i += promtestdataSampleCount { - decSamples := make(model.Vector, 0, 50) - sdec := expfmt.SampleDecoder{ - Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(expfmt.TypeTextPlain)), - Opts: &expfmt.DecodeOptions{ - Timestamp: model.TimeFromUnixNano(0), - }, - } - - for { - if err = sdec.Decode(&decSamples); err != nil { - break - } - total += len(decSamples) - decSamples = decSamples[:0] - } - } - _ = total - }) - } - } -} - -func BenchmarkGzip(b *testing.B) { - for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { - b.Run(fn, func(b *testing.B) { - f, err := os.Open(fn) - require.NoError(b, err) - defer f.Close() - - var buf bytes.Buffer - gw := gzip.NewWriter(&buf) - - n, err := io.Copy(gw, f) - require.NoError(b, err) - require.NoError(b, gw.Close()) - - gbuf, err := io.ReadAll(&buf) - require.NoError(b, err) - - k := b.N / promtestdataSampleCount - - b.ReportAllocs() - b.SetBytes(n / promtestdataSampleCount) - b.ResetTimer() - - total := 0 - - for i := 0; i < k; i++ { - gr, err := gzip.NewReader(bytes.NewReader(gbuf)) - require.NoError(b, err) - - d, err := io.ReadAll(gr) - require.NoError(b, err) - require.NoError(b, gr.Close()) - - total += len(d) - } - _ = total - }) - } -} diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index cf34ae52df..01c6ac5064 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -32,7 +32,9 @@ import ( dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) -func createTestProtoBuf(t *testing.T) *bytes.Buffer { +func createTestProtoBuf(t testing.TB) *bytes.Buffer { + t.Helper() + testMetricFamilies := []string{ `name: "go_build_info" help: "Build information about the main Go module." diff --git a/model/textparse/omtestdata.txt b/model/textparse/testdata/omtestdata.txt similarity index 100% rename from model/textparse/omtestdata.txt rename to model/textparse/testdata/omtestdata.txt diff --git a/model/textparse/promtestdata.nometa.txt b/model/textparse/testdata/promtestdata.nometa.txt similarity index 100% rename from model/textparse/promtestdata.nometa.txt rename to model/textparse/testdata/promtestdata.nometa.txt diff --git a/model/textparse/promtestdata.txt b/model/textparse/testdata/promtestdata.txt similarity index 100% rename from model/textparse/promtestdata.txt rename to model/textparse/testdata/promtestdata.txt From b062dfa4f35bea3192e01ea566179898a78f4c90 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 09:45:23 +0000 Subject: [PATCH 128/137] build(deps): bump golang.org/x/tools from 0.25.0 to 0.26.0 Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.25.0 to 0.26.0. - [Release notes](https://github.com/golang/tools/releases) - [Commits](https://github.com/golang/tools/compare/v0.25.0...v0.26.0) --- updated-dependencies: - dependency-name: golang.org/x/tools dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 5169d5f5ae..65fc298a29 100644 --- a/go.mod +++ b/go.mod @@ -77,10 +77,10 @@ require ( go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.25.0 - golang.org/x/text v0.18.0 + golang.org/x/sys v0.26.0 + golang.org/x/text v0.19.0 golang.org/x/time v0.6.0 - golang.org/x/tools v0.25.0 + golang.org/x/tools v0.26.0 google.golang.org/api v0.199.0 google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 google.golang.org/grpc v1.67.1 @@ -190,11 +190,11 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/metric v1.30.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.27.0 // indirect + golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.29.0 // indirect - golang.org/x/term v0.24.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/term v0.25.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 614bda1d27..7a426e1a2e 100644 --- a/go.sum +++ b/go.sum @@ -782,8 +782,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -865,8 +865,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -955,16 +955,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -976,8 +976,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1035,8 +1035,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= -golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From f6e110d58881cf42eabce398e031434311717de4 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Mon, 7 Oct 2024 13:17:44 +0200 Subject: [PATCH 129/137] textparse: Refactored main testing utils for reusability; fixed proto Units. (#15095) Signed-off-by: bwplotka --- model/textparse/interface.go | 2 + model/textparse/interface_test.go | 97 +++++ model/textparse/openmetricsparse_test.go | 124 +++--- model/textparse/promparse_test.go | 105 +---- model/textparse/protobufparse.go | 6 + model/textparse/protobufparse_test.go | 525 ++++++++++------------- 6 files changed, 382 insertions(+), 477 deletions(-) diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 7de88a4869..3b0e9a96e1 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -69,6 +69,8 @@ type Parser interface { // CreatedTimestamp returns the created timestamp (in milliseconds) for the // current sample. It returns nil if it is unknown e.g. if it wasn't set, // if the scrape protocol or metric type does not support created timestamps. + // Assume the CreatedTimestamp returned pointer is only valid until + // the Next iteration. CreatedTimestamp() *int64 // Next advances the parser to the next sample. diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index e010cb36ec..3f2f758d7e 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -14,11 +14,18 @@ package textparse import ( + "errors" + "io" "testing" + "github.com/google/go-cmp/cmp" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/testutil" ) func TestNewParser(t *testing.T) { @@ -103,3 +110,93 @@ func TestNewParser(t *testing.T) { }) } } + +// parsedEntry represents data that is parsed for each entry. +type parsedEntry struct { + // In all but EntryComment, EntryInvalid. + m string + + // In EntryHistogram. + shs *histogram.Histogram + fhs *histogram.FloatHistogram + + // In EntrySeries. + v float64 + + // In EntrySeries and EntryHistogram. + lset labels.Labels + t *int64 + es []exemplar.Exemplar + ct *int64 + + // In EntryType. + typ model.MetricType + // In EntryHelp. + help string + // In EntryUnit. + unit string + // In EntryComment. + comment string +} + +func requireEntries(t *testing.T, exp, got []parsedEntry) { + t.Helper() + + testutil.RequireEqualWithOptions(t, exp, got, []cmp.Option{ + cmp.AllowUnexported(parsedEntry{}), + }) +} + +func testParse(t *testing.T, p Parser) (ret []parsedEntry) { + t.Helper() + + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + var got parsedEntry + var m []byte + switch et { + case EntryInvalid: + t.Fatal("entry invalid not expected") + case EntrySeries, EntryHistogram: + if et == EntrySeries { + m, got.t, got.v = p.Series() + got.m = string(m) + } else { + m, got.t, got.shs, got.fhs = p.Histogram() + got.m = string(m) + } + + p.Metric(&got.lset) + for e := (exemplar.Exemplar{}); p.Exemplar(&e); { + got.es = append(got.es, e) + } + // Parser reuses int pointer. + if ct := p.CreatedTimestamp(); ct != nil { + got.ct = int64p(*ct) + } + case EntryType: + m, got.typ = p.Type() + got.m = string(m) + + case EntryHelp: + m, h := p.Help() + got.m = string(m) + got.help = string(h) + + case EntryUnit: + m, u := p.Unit() + got.m = string(m) + got.unit = string(u) + + case EntryComment: + got.comment = string(p.Comment()) + } + ret = append(ret, got) + } + return ret +} diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index 1d2e7feb0d..bcb25a253f 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -14,7 +14,6 @@ package textparse import ( - "errors" "io" "testing" @@ -115,7 +114,7 @@ foobar{quantile="0.99"} 150.1` input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\n# EOF\n" - exp := []expectedParse{ + exp := []parsedEntry{ { m: "go_gc_duration_seconds", help: "A summary of the GC invocation durations.", @@ -190,12 +189,16 @@ foobar{quantile="0.99"} 150.1` m: `hhh_bucket{le="+Inf"}`, v: 1, lset: labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4}, + }, }, { m: `hhh_count`, v: 1, lset: labels.FromStrings("__name__", "hhh_count"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, + }, }, { m: "ggh", typ: model.MetricTypeGaugeHistogram, @@ -203,12 +206,16 @@ foobar{quantile="0.99"} 150.1` m: `ggh_bucket{le="+Inf"}`, v: 1, lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + }, }, { m: `ggh_count`, v: 1, lset: labels.FromStrings("__name__", "ggh_count"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + }, }, { m: "smr_seconds", typ: model.MetricTypeSummary, @@ -216,12 +223,16 @@ foobar{quantile="0.99"} 150.1` m: `smr_seconds_count`, v: 2, lset: labels.FromStrings("__name__", "smr_seconds_count"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}, + }, }, { m: `smr_seconds_sum`, v: 42, lset: labels.FromStrings("__name__", "smr_seconds_sum"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}, + }, }, { m: "ii", typ: model.MetricTypeInfo, @@ -270,15 +281,19 @@ foobar{quantile="0.99"} 150.1` v: 17, lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, - ct: int64p(1520872607123), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + }, + ct: int64p(1520872607123), }, { m: `foo_total{a="b"}`, v: 17.0, lset: labels.FromStrings("__name__", "foo_total", "a", "b"), t: int64p(1520879607789), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, - ct: int64p(1520872607123), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + }, + ct: int64p(1520872607123), }, { m: "bar", help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", @@ -430,7 +445,8 @@ foobar{quantile="0.99"} 150.1` } p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) - checkParseResultsWithCT(t, p, exp, true) + got := testParse(t, p) + requireEntries(t, exp, got) } func TestUTF8OpenMetricsParse(t *testing.T) { @@ -455,7 +471,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) { input += "\n# EOF\n" - exp := []expectedParse{ + exp := []parsedEntry{ { m: "go.gc_duration_seconds", help: "A summary of the GC invocation durations.", @@ -504,7 +520,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"), } p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) - checkParseResultsWithCT(t, p, exp, true) + got := testParse(t, p) + requireEntries(t, exp, got) } func TestOpenMetricsParseErrors(t *testing.T) { @@ -878,8 +895,8 @@ func TestOMNullByteHandling(t *testing.T) { } } -// While not desirable, there are cases were CT fails to parse and -// these tests show them. +// TestCTParseFailures tests known failure edge cases, we know does not work due +// current OM spec limitations or clients with broken OM format. // TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. func TestCTParseFailures(t *testing.T) { input := `# HELP thing Histogram with _created as first line @@ -892,68 +909,37 @@ thing_bucket{le="+Inf"} 17` input += "\n# EOF\n" - int64p := func(x int64) *int64 { return &x } - - type expectCT struct { - m string - ct *int64 - typ model.MetricType - help string - isErr bool - } - - exp := []expectCT{ + exp := []parsedEntry{ { - m: "thing", - help: "Histogram with _created as first line", - isErr: false, + m: "thing", + help: "Histogram with _created as first line", }, { - m: "thing", - typ: model.MetricTypeHistogram, - isErr: false, + m: "thing", + typ: model.MetricTypeHistogram, }, { - m: `thing_count`, - ct: int64p(1520872607123), - isErr: true, + m: `thing_count`, + ct: nil, // Should be int64p(1520872607123). }, { - m: `thing_sum`, - ct: int64p(1520872607123), - isErr: true, + m: `thing_sum`, + ct: nil, // Should be int64p(1520872607123). }, { - m: `thing_bucket{le="0.0"}`, - ct: int64p(1520872607123), - isErr: true, + m: `thing_bucket{le="0.0"}`, + ct: nil, // Should be int64p(1520872607123). }, { - m: `thing_bucket{le="+Inf"}`, - ct: int64p(1520872607123), - isErr: true, + m: `thing_bucket{le="+Inf"}`, + ct: nil, // Should be int64p(1520872607123), }, } p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) - i := 0 + got := testParse(t, p) + resetValAndLset(got) // Keep this test focused on metric, basic entries and CT only. + requireEntries(t, exp, got) +} - var res labels.Labels - for { - et, err := p.Next() - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - - switch et { - case EntrySeries: - p.Metric(&res) - - if ct := p.CreatedTimestamp(); exp[i].isErr { - require.Nil(t, ct) - } else { - require.Equal(t, *exp[i].ct, *ct) - } - default: - i++ - continue - } - i++ +func resetValAndLset(e []parsedEntry) { + for i := range e { + e[i].v = 0 + e[i].lset = labels.EmptyLabels() } } diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index e700b35275..b726d8847a 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -14,33 +14,15 @@ package textparse import ( - "errors" "io" - "strings" "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/prometheus/common/model" - - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/util/testutil" ) -type expectedParse struct { - lset labels.Labels - m string - t *int64 - v float64 - typ model.MetricType - help string - unit string - comment string - e *exemplar.Exemplar - ct *int64 -} - func TestPromParse(t *testing.T) { input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary @@ -72,9 +54,7 @@ testmetric{label="\"bar\""} 1` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" - int64p := func(x int64) *int64 { return &x } - - exp := []expectedParse{ + exp := []parsedEntry{ { m: "go_gc_duration_seconds", help: "A summary of the GC invocation durations.", @@ -182,80 +162,8 @@ testmetric{label="\"bar\""} 1` } p := NewPromParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) -} - -func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { - checkParseResultsWithCT(t, p, exp, false) -} - -func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) { - i := 0 - - var res labels.Labels - - for { - et, err := p.Next() - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - - switch et { - case EntrySeries: - m, ts, v := p.Series() - - p.Metric(&res) - - if ctLinesRemoved { - // Are CT series skipped? - _, typ := p.Type() - if typeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") { - t.Fatalf("we exped created lines skipped") - } - } - - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].t, ts) - require.Equal(t, exp[i].v, v) - testutil.RequireEqual(t, exp[i].lset, res) - - var e exemplar.Exemplar - found := p.Exemplar(&e) - if exp[i].e == nil { - require.False(t, found) - } else { - require.True(t, found) - testutil.RequireEqual(t, *exp[i].e, e) - } - if ct := p.CreatedTimestamp(); ct != nil { - require.Equal(t, *exp[i].ct, *ct) - } else { - require.Nil(t, exp[i].ct) - } - - case EntryType: - m, typ := p.Type() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].typ, typ) - - case EntryHelp: - m, h := p.Help() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].help, string(h)) - - case EntryUnit: - m, u := p.Unit() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].unit, string(u)) - - case EntryComment: - require.Equal(t, exp[i].comment, string(p.Comment())) - } - - i++ - } - require.Len(t, exp, i) + got := testParse(t, p) + requireEntries(t, exp, got) } func TestUTF8PromParse(t *testing.T) { @@ -279,7 +187,7 @@ func TestUTF8PromParse(t *testing.T) { {"go.gc_duration_seconds_count"} 99 {"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"} 10.0` - exp := []expectedParse{ + exp := []parsedEntry{ { m: "go.gc_duration_seconds", help: "A summary of the GC invocation durations.", @@ -335,7 +243,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"), } p := NewPromParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + got := testParse(t, p) + requireEntries(t, exp, got) } func TestPromParseErrors(t *testing.T) { diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index 9f1400cee3..b3dfdfca1c 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -457,6 +457,12 @@ func (p *ProtobufParser) Next() (Entry, error) { p.state = EntryHelp case EntryHelp: + if p.mf.Unit != "" { + p.state = EntryUnit + } else { + p.state = EntryType + } + case EntryUnit: p.state = EntryType case EntryType: t := p.mf.GetType() diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 01c6ac5064..0c09279fed 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -16,8 +16,6 @@ package textparse import ( "bytes" "encoding/binary" - "errors" - "io" "testing" "github.com/gogo/protobuf/proto" @@ -27,8 +25,6 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/util/testutil" - dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) @@ -785,32 +781,17 @@ metric: < } func TestProtobufParse(t *testing.T) { - type parseResult struct { - lset labels.Labels - m string - t int64 - v float64 - typ model.MetricType - help string - unit string - comment string - shs *histogram.Histogram - fhs *histogram.FloatHistogram - e []exemplar.Exemplar - ct int64 - } - inputBuf := createTestProtoBuf(t) scenarios := []struct { name string parser Parser - expected []parseResult + expected []parsedEntry }{ { name: "ignore classic buckets of native histograms", parser: NewProtobufParser(inputBuf.Bytes(), false, labels.NewSymbolTable()), - expected: []parseResult{ + expected: []parsedEntry{ { m: "go_build_info", help: "Build information about the main Go module.", @@ -832,6 +813,9 @@ func TestProtobufParse(t *testing.T) { { m: "go_memstats_alloc_bytes_total", help: "Total number of bytes allocated, even if freed.", + }, + { + m: "go_memstats_alloc_bytes_total", unit: "bytes", }, { @@ -844,7 +828,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "go_memstats_alloc_bytes_total", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, }, }, @@ -858,7 +842,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "something_untyped", - t: 1234567, + t: int64p(1234567), v: 42, lset: labels.FromStrings( "__name__", "something_untyped", @@ -874,7 +858,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -895,7 +879,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -909,7 +893,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gauge_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, Count: 175, @@ -931,7 +915,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -945,7 +929,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ Count: 175.0, ZeroCount: 2.0, @@ -966,7 +950,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -980,7 +964,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gauge_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ CounterResetHint: histogram.GaugeType, Count: 175.0, @@ -1002,7 +986,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -1043,7 +1027,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram2_bucket", "le", "-0.00038", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, }, }, @@ -1054,7 +1038,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram2_bucket", "le", "1.0", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, }, }, @@ -1237,7 +1221,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_counter_with_createdtimestamp", v: 42, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_counter_with_createdtimestamp", ), @@ -1253,7 +1237,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_summary_with_createdtimestamp_count", v: 42, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_count", ), @@ -1261,7 +1245,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_summary_with_createdtimestamp_sum", v: 1.234, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_sum", ), @@ -1276,7 +1260,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_with_createdtimestamp", - ct: 1000, + ct: int64p(1000), shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, PositiveSpans: []histogram.Span{}, @@ -1296,7 +1280,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gaugehistogram_with_createdtimestamp", - ct: 1000, + ct: int64p(1000), shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, PositiveSpans: []histogram.Span{}, @@ -1316,7 +1300,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_with_native_histogram_exemplars", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -1337,7 +1321,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, @@ -1352,7 +1336,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_with_native_histogram_exemplars2", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -1373,7 +1357,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -1382,16 +1366,16 @@ func TestProtobufParse(t *testing.T) { { name: "parse classic and native buckets", parser: NewProtobufParser(inputBuf.Bytes(), true, labels.NewSymbolTable()), - expected: []parseResult{ - { // 0 + expected: []parsedEntry{ + { m: "go_build_info", help: "Build information about the main Go module.", }, - { // 1 + { m: "go_build_info", typ: model.MetricTypeGauge, }, - { // 2 + { m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", v: 1, lset: labels.FromStrings( @@ -1401,51 +1385,55 @@ func TestProtobufParse(t *testing.T) { "version", "(devel)", ), }, - { // 3 + { m: "go_memstats_alloc_bytes_total", help: "Total number of bytes allocated, even if freed.", }, - { // 4 + { + m: "go_memstats_alloc_bytes_total", + unit: "bytes", + }, + { m: "go_memstats_alloc_bytes_total", typ: model.MetricTypeCounter, }, - { // 5 + { m: "go_memstats_alloc_bytes_total", v: 1.546544e+06, lset: labels.FromStrings( "__name__", "go_memstats_alloc_bytes_total", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, }, }, - { // 6 + { m: "something_untyped", help: "Just to test the untyped type.", }, - { // 7 + { m: "something_untyped", typ: model.MetricTypeUnknown, }, - { // 8 + { m: "something_untyped", - t: 1234567, + t: int64p(1234567), v: 42, lset: labels.FromStrings( "__name__", "something_untyped", ), }, - { // 9 + { m: "test_histogram", help: "Test histogram with many buckets removed to keep it manageable in size.", }, - { // 10 + { m: "test_histogram", typ: model.MetricTypeHistogram, }, - { // 11 + { m: "test_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -1466,79 +1454,79 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 12 + { m: "test_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_count", ), }, - { // 13 + { m: "test_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_histogram_sum", ), }, - { // 14 + { m: "test_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 15 + { m: "test_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 16 + { m: "test_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 17 + { m: "test_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "+Inf", ), }, - { // 18 + { m: "test_gauge_histogram", help: "Like test_histogram but as gauge histogram.", }, - { // 19 + { m: "test_gauge_histogram", typ: model.MetricTypeGaugeHistogram, }, - { // 20 + { m: "test_gauge_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, Count: 175, @@ -1560,79 +1548,79 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 21 + { m: "test_gauge_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_histogram_count", ), }, - { // 22 + { m: "test_gauge_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_gauge_histogram_sum", ), }, - { // 23 + { m: "test_gauge_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 24 + { m: "test_gauge_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 25 + { m: "test_gauge_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 26 + { m: "test_gauge_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "+Inf", ), }, - { // 27 + { m: "test_float_histogram", help: "Test float histogram with many buckets removed to keep it manageable in size.", }, - { // 28 + { m: "test_float_histogram", typ: model.MetricTypeHistogram, }, - { // 29 + { m: "test_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ Count: 175.0, ZeroCount: 2.0, @@ -1653,79 +1641,79 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 30 + { m: "test_float_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_float_histogram_count", ), }, - { // 31 + { m: "test_float_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_float_histogram_sum", ), }, - { // 32 + { m: "test_float_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 33 + { m: "test_float_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 34 + { m: "test_float_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 35 + { m: "test_float_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "+Inf", ), }, - { // 36 + { m: "test_gauge_float_histogram", help: "Like test_float_histogram but as gauge histogram.", }, - { // 37 + { m: "test_gauge_float_histogram", typ: model.MetricTypeGaugeHistogram, }, - { // 38 + { m: "test_gauge_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ CounterResetHint: histogram.GaugeType, Count: 175.0, @@ -1747,91 +1735,91 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 39 + { m: "test_gauge_float_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_count", ), }, - { // 40 + { m: "test_gauge_float_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_sum", ), }, - { // 41 + { m: "test_gauge_float_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 42 + { m: "test_gauge_float_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 43 + { m: "test_gauge_float_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 44 + { m: "test_gauge_float_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "+Inf", ), }, - { // 45 + { m: "test_histogram2", help: "Similar histogram as before but now without sparse buckets.", }, - { // 46 + { m: "test_histogram2", typ: model.MetricTypeHistogram, }, - { // 47 + { m: "test_histogram2_count", v: 175, lset: labels.FromStrings( "__name__", "test_histogram2_count", ), }, - { // 48 + { m: "test_histogram2_sum", v: 0.000828, lset: labels.FromStrings( "__name__", "test_histogram2_sum", ), }, - { // 49 + { m: "test_histogram2_bucket\xffle\xff-0.00048", v: 2, lset: labels.FromStrings( @@ -1839,29 +1827,29 @@ func TestProtobufParse(t *testing.T) { "le", "-0.00048", ), }, - { // 50 + { m: "test_histogram2_bucket\xffle\xff-0.00038", v: 4, lset: labels.FromStrings( "__name__", "test_histogram2_bucket", "le", "-0.00038", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, }, }, - { // 51 + { m: "test_histogram2_bucket\xffle\xff1.0", v: 16, lset: labels.FromStrings( "__name__", "test_histogram2_bucket", "le", "1.0", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, }, }, - { // 52 + { m: "test_histogram2_bucket\xffle\xff+Inf", v: 175, lset: labels.FromStrings( @@ -1869,15 +1857,15 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, - { // 53 + { m: "test_histogram_family", help: "Test histogram metric family with two very simple histograms.", }, - { // 54 + { m: "test_histogram_family", typ: model.MetricTypeHistogram, }, - { // 55 + { m: "test_histogram_family\xfffoo\xffbar", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -1895,7 +1883,7 @@ func TestProtobufParse(t *testing.T) { "foo", "bar", ), }, - { // 56 + { m: "test_histogram_family_count\xfffoo\xffbar", v: 5, lset: labels.FromStrings( @@ -1903,7 +1891,7 @@ func TestProtobufParse(t *testing.T) { "foo", "bar", ), }, - { // 57 + { m: "test_histogram_family_sum\xfffoo\xffbar", v: 12.1, lset: labels.FromStrings( @@ -1911,7 +1899,7 @@ func TestProtobufParse(t *testing.T) { "foo", "bar", ), }, - { // 58 + { m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff1.1", v: 2, lset: labels.FromStrings( @@ -1920,7 +1908,7 @@ func TestProtobufParse(t *testing.T) { "le", "1.1", ), }, - { // 59 + { m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff2.2", v: 3, lset: labels.FromStrings( @@ -1929,7 +1917,7 @@ func TestProtobufParse(t *testing.T) { "le", "2.2", ), }, - { // 60 + { m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff+Inf", v: 5, lset: labels.FromStrings( @@ -1938,7 +1926,7 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, - { // 61 + { m: "test_histogram_family\xfffoo\xffbaz", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -1956,7 +1944,7 @@ func TestProtobufParse(t *testing.T) { "foo", "baz", ), }, - { // 62 + { m: "test_histogram_family_count\xfffoo\xffbaz", v: 6, lset: labels.FromStrings( @@ -1964,7 +1952,7 @@ func TestProtobufParse(t *testing.T) { "foo", "baz", ), }, - { // 63 + { m: "test_histogram_family_sum\xfffoo\xffbaz", v: 13.1, lset: labels.FromStrings( @@ -1972,7 +1960,7 @@ func TestProtobufParse(t *testing.T) { "foo", "baz", ), }, - { // 64 + { m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff1.1", v: 1, lset: labels.FromStrings( @@ -1981,7 +1969,7 @@ func TestProtobufParse(t *testing.T) { "le", "1.1", ), }, - { // 65 + { m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff2.2", v: 5, lset: labels.FromStrings( @@ -1990,7 +1978,7 @@ func TestProtobufParse(t *testing.T) { "le", "2.2", ), }, - { // 66 + { m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff+Inf", v: 6, lset: labels.FromStrings( @@ -1999,15 +1987,15 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, - { // 67 + { m: "test_float_histogram_with_zerothreshold_zero", help: "Test float histogram with a zero threshold of zero.", }, - { // 68 + { m: "test_float_histogram_with_zerothreshold_zero", typ: model.MetricTypeHistogram, }, - { // 69 + { m: "test_float_histogram_with_zerothreshold_zero", fhs: &histogram.FloatHistogram{ Count: 5.0, @@ -2023,15 +2011,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_float_histogram_with_zerothreshold_zero", ), }, - { // 70 + { m: "rpc_durations_seconds", help: "RPC latency distributions.", }, - { // 71 + { m: "rpc_durations_seconds", typ: model.MetricTypeSummary, }, - { // 72 + { m: "rpc_durations_seconds_count\xffservice\xffexponential", v: 262, lset: labels.FromStrings( @@ -2039,7 +2027,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 73 + { m: "rpc_durations_seconds_sum\xffservice\xffexponential", v: 0.00025551262820703587, lset: labels.FromStrings( @@ -2047,7 +2035,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 74 + { m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", v: 6.442786329648548e-07, lset: labels.FromStrings( @@ -2056,7 +2044,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 75 + { m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", v: 1.9435742936658396e-06, lset: labels.FromStrings( @@ -2065,7 +2053,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 76 + { m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", v: 4.0471608667037015e-06, lset: labels.FromStrings( @@ -2074,37 +2062,37 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 77 + { m: "without_quantiles", help: "A summary without quantiles.", }, - { // 78 + { m: "without_quantiles", typ: model.MetricTypeSummary, }, - { // 79 + { m: "without_quantiles_count", v: 42, lset: labels.FromStrings( "__name__", "without_quantiles_count", ), }, - { // 80 + { m: "without_quantiles_sum", v: 1.234, lset: labels.FromStrings( "__name__", "without_quantiles_sum", ), }, - { // 81 + { m: "empty_histogram", help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", }, - { // 82 + { m: "empty_histogram", typ: model.MetricTypeHistogram, }, - { // 83 + { m: "empty_histogram", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -2115,57 +2103,57 @@ func TestProtobufParse(t *testing.T) { "__name__", "empty_histogram", ), }, - { // 84 + { m: "test_counter_with_createdtimestamp", help: "A counter with a created timestamp.", }, - { // 85 + { m: "test_counter_with_createdtimestamp", typ: model.MetricTypeCounter, }, - { // 86 + { m: "test_counter_with_createdtimestamp", v: 42, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_counter_with_createdtimestamp", ), }, - { // 87 + { m: "test_summary_with_createdtimestamp", help: "A summary with a created timestamp.", }, - { // 88 + { m: "test_summary_with_createdtimestamp", typ: model.MetricTypeSummary, }, - { // 89 + { m: "test_summary_with_createdtimestamp_count", v: 42, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_count", ), }, - { // 90 + { m: "test_summary_with_createdtimestamp_sum", v: 1.234, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_sum", ), }, - { // 91 + { m: "test_histogram_with_createdtimestamp", help: "A histogram with a created timestamp.", }, - { // 92 + { m: "test_histogram_with_createdtimestamp", typ: model.MetricTypeHistogram, }, - { // 93 + { m: "test_histogram_with_createdtimestamp", - ct: 1000, + ct: int64p(1000), shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, PositiveSpans: []histogram.Span{}, @@ -2175,17 +2163,17 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_createdtimestamp", ), }, - { // 94 + { m: "test_gaugehistogram_with_createdtimestamp", help: "A gauge histogram with a created timestamp.", }, - { // 95 + { m: "test_gaugehistogram_with_createdtimestamp", typ: model.MetricTypeGaugeHistogram, }, - { // 96 + { m: "test_gaugehistogram_with_createdtimestamp", - ct: 1000, + ct: int64p(1000), shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, PositiveSpans: []histogram.Span{}, @@ -2195,17 +2183,17 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_gaugehistogram_with_createdtimestamp", ), }, - { // 97 + { m: "test_histogram_with_native_histogram_exemplars", help: "A histogram with native histogram exemplars.", }, - { // 98 + { m: "test_histogram_with_native_histogram_exemplars", typ: model.MetricTypeHistogram, }, - { // 99 + { m: "test_histogram_with_native_histogram_exemplars", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -2226,80 +2214,80 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, }, - { // 100 + { m: "test_histogram_with_native_histogram_exemplars_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_count", ), }, - { // 101 + { m: "test_histogram_with_native_histogram_exemplars_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_sum", ), }, - { // 102 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "-0.0004899999999999998", ), }, - { // 103 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 104 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 105 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "+Inf", ), }, - { // 106 + { m: "test_histogram_with_native_histogram_exemplars2", help: "Another histogram with native histogram exemplars.", }, - { // 107 + { m: "test_histogram_with_native_histogram_exemplars2", typ: model.MetricTypeHistogram, }, - { // 108 + { m: "test_histogram_with_native_histogram_exemplars2", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -2320,56 +2308,56 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 109 + { m: "test_histogram_with_native_histogram_exemplars2_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_count", ), }, - { // 110 + { m: "test_histogram_with_native_histogram_exemplars2_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_sum", ), }, - { // 111 + { m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", "le", "-0.0004899999999999998", ), }, - { // 112 + { m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", "le", "-0.0003899999999999998", ), }, - { // 113 + { m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", "le", "-0.0002899999999999998", ), }, - { // 114 + { m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", @@ -2383,94 +2371,11 @@ func TestProtobufParse(t *testing.T) { for _, scenario := range scenarios { t.Run(scenario.name, func(t *testing.T) { var ( - i int - res labels.Labels p = scenario.parser exp = scenario.expected ) - - for { - et, err := p.Next() - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - - switch et { - case EntrySeries: - m, ts, v := p.Series() - - var e exemplar.Exemplar - p.Metric(&res) - eFound := p.Exemplar(&e) - ct := p.CreatedTimestamp() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - if ts != nil { - require.Equal(t, exp[i].t, *ts, "i: %d", i) - } else { - require.Equal(t, int64(0), exp[i].t, "i: %d", i) - } - require.Equal(t, exp[i].v, v, "i: %d", i) - testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) - if len(exp[i].e) == 0 { - require.False(t, eFound, "i: %d", i) - } else { - require.True(t, eFound, "i: %d", i) - testutil.RequireEqual(t, exp[i].e[0], e, "i: %d", i) - require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i) - } - if exp[i].ct != 0 { - require.NotNilf(t, ct, "i: %d", i) - require.Equal(t, exp[i].ct, *ct, "i: %d", i) - } else { - require.Nilf(t, ct, "i: %d", i) - } - - case EntryHistogram: - m, ts, shs, fhs := p.Histogram() - p.Metric(&res) - require.Equal(t, exp[i].m, string(m), "i: %d", i) - if ts != nil { - require.Equal(t, exp[i].t, *ts, "i: %d", i) - } else { - require.Equal(t, int64(0), exp[i].t, "i: %d", i) - } - testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) - require.Equal(t, exp[i].m, string(m), "i: %d", i) - if shs != nil { - require.Equal(t, exp[i].shs, shs, "i: %d", i) - } else { - require.Equal(t, exp[i].fhs, fhs, "i: %d", i) - } - j := 0 - for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { - testutil.RequireEqual(t, exp[i].e[j], e, "i: %d", i) - e = exemplar.Exemplar{} - } - require.Len(t, exp[i].e, j, "not enough exemplars found, i: %d", i) - - case EntryType: - m, typ := p.Type() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - require.Equal(t, exp[i].typ, typ, "i: %d", i) - - case EntryHelp: - m, h := p.Help() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - require.Equal(t, exp[i].help, string(h), "i: %d", i) - - case EntryUnit: - m, u := p.Unit() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - require.Equal(t, exp[i].unit, string(u), "i: %d", i) - - case EntryComment: - require.Equal(t, exp[i].comment, string(p.Comment()), "i: %d", i) - } - - i++ - } - require.Len(t, exp, i) + got := testParse(t, p) + requireEntries(t, exp, got) }) } } From db730fcade5d10275b68b21a96e262e297605f1c Mon Sep 17 00:00:00 2001 From: Julien Date: Mon, 7 Oct 2024 12:45:09 +0200 Subject: [PATCH 130/137] Document the notifications API Signed-off-by: Julien --- docs/querying/api.md | 66 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/docs/querying/api.md b/docs/querying/api.md index 714438398b..3fcc7322d1 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -1393,3 +1393,69 @@ Enable the OTLP receiver by setting endpoint is `/api/v1/otlp/v1/metrics`. *New in v2.47* + +## Notifications + +The following endpoints provide information about active status notifications concerning the Prometheus server itself. +Notifications are used in the web UI. + +These endpoints are **experimental**. They may change in the future. + +### Active Notifications + +The `/api/v1/notifications` endpoint returns a list of all currently active notifications. + +``` +GET /api/v1/notifications +``` + +Example: + +``` +$ curl http://localhost:9090/api/v1/notifications +{ + "status": "success", + "data": [ + { + "text": "Prometheus is shutting down and gracefully stopping all operations.", + "date": "2024-10-07T12:33:08.551376578+02:00", + "active": true + } + ] +} +``` + +*New in v3.0* + +### Live Notifications + +The `/api/v1/notifications/live` endpoint streams live notifications as they occur, using [Server-Sent Events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events). Deleted notifications are sent with `active: false`. Active notifications will be sent when connecting to the endpoint. + +``` +GET /api/v1/notifications/live +``` + +Example: + +``` +$ curl http://localhost:9090/api/v1/notifications/live +data: { + "status": "success", + "data": [ + { + "text": "Prometheus is shutting down and gracefully stopping all operations.", + "date": "2024-10-07T12:33:08.551376578+02:00", + "active": true + } + ] +} +``` + +**Note:** The `/notifications/live` endpoint will return a `204 No Content` response if the maximum number of subscribers has been reached. You can set the maximum number of listeners with the flag `--web.max-notifications-subscribers`, which defaults to 16. + +``` +GET /api/v1/notifications/live +204 No Content +``` + +*New in v3.0* From 08d4b034da5bb20eaadb55b26f57e4d3c3fbb266 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Sat, 5 Oct 2024 20:31:14 +0200 Subject: [PATCH 131/137] docs: Declare "float literals are time durations" as stable This unifies the documentation of float literals and time durations and updates all references to the old definitions. Signed-off-by: beorn7 --- docs/querying/api.md | 2 +- docs/querying/basics.md | 111 +++++++++++++++++++++------------------- 2 files changed, 60 insertions(+), 53 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 714438398b..2787340945 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -59,7 +59,7 @@ timestamps are always represented as Unix timestamps in seconds. * ``: Prometheus [time series selectors](basics.md#time-series-selectors) like `http_requests_total` or `http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded. -* ``: [Prometheus duration strings](basics.md#time-durations). +* ``: [the subset of Prometheus float literals using time units](basics.md#float-literals-and-time-durations). For example, `5m` refers to a duration of 5 minutes. * ``: boolean values (strings `true` and `false`). diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 66d7b8018d..99de293d4f 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -68,9 +68,10 @@ Example: 'these are unescaped: \n \\ \t' `these are not unescaped: \n ' " \t` -### Float literals +### Float literals and time durations -Scalar float values can be written as literal integer or floating-point numbers in the format (whitespace only included for better readability): +Scalar float values can be written as literal integer or floating-point numbers +in the format (whitespace only included for better readability): [-+]?( [0-9]*\.?[0-9]+([eE][-+]?[0-9]+)? @@ -87,16 +88,53 @@ Examples: 0x8f -Inf NaN - -As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change. +Additionally, underscores (`_`) can be used in between decimal or hexadecimal +digits to improve readability. Examples: - 1s # Equivalent to 1.0 - 2m # Equivalent to 120.0 - 1ms # Equivalent to 0.001 - + 1_000_000 + .123_456_789 + 0x_53_AB_F3_82 + +Float literals are also used to specify durations in seconds. For convenience, +decimal integer numbers may be combined with the following +time units: + +* `ms` – milliseconds +* `s` – seconds – 1s equals 1000ms +* `m` – minutes – 1m equals 60s (ignoring leap seconds) +* `h` – hours – 1h equals 60m +* `d` – days – 1d equals 24h (ignoring so-called daylight saving time) +* `w` – weeks – 1w equals 7d +* `y` – years – 1y equals 365d (ignoring leap days) + +Suffixing a decimal integer number with one of the units above is a different +representation of the equivalent number of seconds as a bare float literal. + +Examples: + + 1s # Equivalent to 1. + 2m # Equivalent to 120. + 1ms # Equivalent to 0.001. + -2h # Equivalent to -7200. + +The following examples do _not_ work: + + 0xABm # No suffixing of hexadecimal numbers. + 1.5h # Time units cannot be combined with a floating point. + +Infd # No suffixing of ±Inf or NaN. + +Multiple units can be combined by concatenation of suffixed integers. Units +must be ordered from the longest to the shortest. A given unit must only appear +once per float literal. + +Examples: + + 1h30m # Equivalent to 5400s and thus 5400. + 12h34m56s # Equivalent to 45296s and thus 45296. + 54s321ms # Equivalent to 54.321. ## Time series selectors @@ -208,53 +246,22 @@ syntax](https://github.com/google/re2/wiki/Syntax). ### Range Vector Selectors Range vector literals work like instant vector literals, except that they -select a range of samples back from the current instant. Syntactically, a [time -duration](#time-durations) is appended in square brackets (`[]`) at the end of -a vector selector to specify how far back in time values should be fetched for -each resulting range vector element. The range is a left-open and right-closed interval, -i.e. samples with timestamps coinciding with the left boundary of the range are excluded from the selection, -while samples coinciding with the right boundary of the range are included in the selection. +select a range of samples back from the current instant. Syntactically, a +[float literal](#float-literals-and-time-durations) is appended in square +brackets (`[]`) at the end of a vector selector to specify for how many seconds +back in time values should be fetched for each resulting range vector element. +Commonly, the float literal uses the syntax with one or more time units, e.g. +`[5m]`. The range is a left-open and right-closed interval, i.e. samples with +timestamps coinciding with the left boundary of the range are excluded from the +selection, while samples coinciding with the right boundary of the range are +included in the selection. -In this example, we select all the values recorded less than 5m ago for all time series -that have the metric name `http_requests_total` and -a `job` label set to `prometheus`: +In this example, we select all the values recorded less than 5m ago for all +time series that have the metric name `http_requests_total` and a `job` label +set to `prometheus`: http_requests_total{job="prometheus"}[5m] -### Time Durations - -Time durations are specified as a number, followed immediately by one of the -following units: - -* `ms` - milliseconds -* `s` - seconds -* `m` - minutes -* `h` - hours -* `d` - days - assuming a day always has 24h -* `w` - weeks - assuming a week always has 7d -* `y` - years - assuming a year always has 365d1 - -1 For days in a year, the leap day is ignored, and conversely, for a minute, a leap second is ignored. - -Time durations can be combined by concatenation. Units must be ordered from the -longest to the shortest. A given unit must only appear once in a time duration. - -Here are some examples of valid time durations: - - 5h - 1h30m - 5m - 10s - - -As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change. - -Examples: - - 1.0 # Equivalent to 1s - 0.001 # Equivalent to 1ms - 120 # Equivalent to 2m - ### Offset modifier The `offset` modifier allows changing the time offset for individual @@ -337,7 +344,7 @@ Note that the `@` modifier allows a query to look ahead of its evaluation time. Subquery allows you to run an instant query for a given range and resolution. The result of a subquery is a range vector. -Syntax: ` '[' ':' [] ']' [ @ ] [ offset ]` +Syntax: ` '[' ':' [] ']' [ @ ] [ offset ]` * `` is optional. Default is the global evaluation interval. From 6ebfbd2d540af150c3e001d13e804ad199dbc103 Mon Sep 17 00:00:00 2001 From: TJ Hoplock Date: Mon, 9 Sep 2024 21:41:53 -0400 Subject: [PATCH 132/137] chore!: adopt log/slog, remove go-kit/log For: #14355 This commit updates Prometheus to adopt stdlib's log/slog package in favor of go-kit/log. As part of converting to use slog, several other related changes are required to get prometheus working, including: - removed unused logging util func `RateLimit()` - forward ported the util/logging/Deduper logging by implementing a small custom slog.Handler that does the deduping before chaining log calls to the underlying real slog.Logger - move some of the json file logging functionality to use prom/common package functionality - refactored some of the new json file logging for scraping - changes to promql.QueryLogger interface to swap out logging methods for relevant slog sugar wrappers - updated lots of tests that used/replicated custom logging functionality, attempting to keep the logical goal of the tests consistent after the transition - added a healthy amount of `if logger == nil { $makeLogger }` type conditional checks amongst various functions where none were provided -- old code that used the go-kit/log.Logger interface had several places where there were nil references when trying to use functions like `With()` to add keyvals on the new *slog.Logger type Signed-off-by: TJ Hoplock --- .golangci.yml | 3 +- cmd/prometheus/main.go | 255 ++++++++++-------- cmd/prometheus/main_test.go | 4 +- cmd/prometheus/query_log_test.go | 1 + cmd/promtool/backfill.go | 5 +- cmd/promtool/main.go | 6 +- cmd/promtool/rules.go | 18 +- cmd/promtool/rules_test.go | 4 +- cmd/promtool/sd.go | 4 +- cmd/promtool/tsdb.go | 12 +- cmd/promtool/unittest.go | 4 +- config/config.go | 11 +- config/config_test.go | 64 ++--- discovery/README.md | 2 +- discovery/aws/ec2.go | 18 +- discovery/aws/lightsail.go | 7 +- discovery/azure/azure.go | 30 +-- discovery/azure/azure_test.go | 4 +- discovery/consul/consul.go | 26 +- discovery/consul/consul_test.go | 4 +- discovery/digitalocean/digitalocean.go | 4 +- discovery/digitalocean/digitalocean_test.go | 4 +- discovery/discovery.go | 4 +- discovery/dns/dns.go | 22 +- discovery/dns/dns_test.go | 18 +- discovery/eureka/eureka.go | 4 +- discovery/file/file.go | 28 +- discovery/gce/gce.go | 4 +- discovery/hetzner/hcloud.go | 4 +- discovery/hetzner/hcloud_test.go | 4 +- discovery/hetzner/hetzner.go | 6 +- discovery/hetzner/robot.go | 4 +- discovery/hetzner/robot_test.go | 6 +- discovery/http/http.go | 7 +- discovery/http/http_test.go | 10 +- discovery/ionos/ionos.go | 4 +- discovery/ionos/server.go | 4 +- discovery/kubernetes/endpoints.go | 46 ++-- discovery/kubernetes/endpointslice.go | 34 +-- discovery/kubernetes/ingress.go | 13 +- discovery/kubernetes/kubernetes.go | 26 +- discovery/kubernetes/kubernetes_test.go | 4 +- discovery/kubernetes/node.go | 18 +- discovery/kubernetes/pod.go | 22 +- discovery/kubernetes/service.go | 16 +- discovery/linode/linode.go | 4 +- discovery/linode/linode_test.go | 4 +- discovery/manager.go | 22 +- discovery/manager_test.go | 26 +- discovery/marathon/marathon.go | 4 +- discovery/moby/docker.go | 4 +- discovery/moby/docker_test.go | 6 +- discovery/moby/dockerswarm.go | 4 +- discovery/moby/nodes_test.go | 4 +- discovery/moby/services_test.go | 6 +- discovery/moby/tasks_test.go | 4 +- discovery/nomad/nomad.go | 4 +- discovery/nomad/nomad_test.go | 4 +- discovery/openstack/hypervisor.go | 6 +- discovery/openstack/instance.go | 22 +- discovery/openstack/openstack.go | 6 +- discovery/ovhcloud/dedicated_server.go | 12 +- discovery/ovhcloud/dedicated_server_test.go | 4 +- discovery/ovhcloud/ovhcloud.go | 6 +- discovery/ovhcloud/ovhcloud_test.go | 4 +- discovery/ovhcloud/vps.go | 12 +- discovery/ovhcloud/vps_test.go | 4 +- discovery/puppetdb/puppetdb.go | 7 +- discovery/puppetdb/puppetdb_test.go | 12 +- discovery/refresh/refresh.go | 16 +- discovery/scaleway/scaleway.go | 4 +- discovery/triton/triton.go | 4 +- discovery/uyuni/uyuni.go | 6 +- discovery/vultr/vultr.go | 4 +- discovery/vultr/vultr_test.go | 4 +- discovery/xds/kuma.go | 10 +- discovery/xds/kuma_mads.pb.go | 5 +- discovery/xds/xds.go | 11 +- discovery/xds/xds_test.go | 4 +- discovery/zookeeper/zookeeper.go | 13 +- .../examples/custom-sd/adapter-usage/main.go | 25 +- .../examples/custom-sd/adapter/adapter.go | 9 +- documentation/examples/remote_storage/go.mod | 2 +- .../remote_storage_adapter/graphite/client.go | 12 +- .../remote_storage_adapter/influxdb/client.go | 14 +- .../remote_storage_adapter/main.go | 47 ++-- .../remote_storage_adapter/opentsdb/client.go | 9 +- go.mod | 14 +- go.sum | 205 +------------- notifier/notifier.go | 42 +-- notifier/notifier_test.go | 4 +- promql/engine.go | 42 +-- promql/engine_internal_test.go | 17 +- promql/engine_test.go | 60 ++++- promql/query_logger.go | 31 ++- rules/alerting.go | 9 +- rules/alerting_test.go | 26 +- rules/group.go | 58 ++-- rules/manager.go | 22 +- rules/manager_test.go | 50 ++-- rules/origin_test.go | 5 +- scrape/manager.go | 42 +-- scrape/manager_test.go | 8 +- scrape/scrape.go | 85 +++--- scrape/scrape_test.go | 11 +- storage/fanout.go | 13 +- storage/remote/codec_test.go | 4 +- storage/remote/metadata_watcher.go | 18 +- .../prometheusremotewrite/histograms_test.go | 3 +- .../number_data_points_test.go | 3 +- storage/remote/queue_manager.go | 64 ++--- storage/remote/queue_manager_test.go | 9 +- storage/remote/read_handler.go | 15 +- storage/remote/storage.go | 18 +- storage/remote/write.go | 9 +- storage/remote/write_handler.go | 65 +++-- storage/remote/write_handler_test.go | 31 +-- tracing/tracing.go | 17 +- tracing/tracing_test.go | 12 +- tsdb/agent/db.go | 35 ++- tsdb/agent/db_test.go | 15 +- tsdb/block.go | 15 +- tsdb/block_test.go | 12 +- tsdb/blockwriter.go | 11 +- tsdb/blockwriter_test.go | 5 +- tsdb/compact.go | 42 +-- tsdb/compact_test.go | 16 +- tsdb/db.go | 78 +++--- tsdb/db_test.go | 37 +-- tsdb/head.go | 76 +++--- tsdb/head_append.go | 16 +- tsdb/head_dedupelabels.go | 9 +- tsdb/head_other.go | 4 +- tsdb/head_read.go | 6 +- tsdb/head_wal.go | 29 +- tsdb/repair.go | 18 +- tsdb/tombstones/tombstones.go | 10 +- tsdb/tombstones/tombstones_test.go | 5 +- tsdb/tsdbblockutil.go | 7 +- tsdb/tsdbutil/dir_locker.go | 9 +- tsdb/tsdbutil/dir_locker_test.go | 5 +- tsdb/tsdbutil/dir_locker_testutil.go | 4 +- tsdb/wlog/checkpoint.go | 8 +- tsdb/wlog/checkpoint_test.go | 7 +- tsdb/wlog/live_reader.go | 9 +- tsdb/wlog/reader_test.go | 14 +- tsdb/wlog/watcher.go | 44 +-- tsdb/wlog/watcher_test.go | 8 +- tsdb/wlog/wlog.go | 36 +-- tsdb/wlog/wlog_test.go | 7 +- util/logging/dedupe.go | 143 +++++----- util/logging/dedupe_test.go | 41 +-- util/logging/file.go | 52 ++-- util/logging/file_test.go | 9 +- util/logging/ratelimit.go | 39 --- util/testutil/logging.go | 35 --- util/treecache/treecache.go | 31 ++- web/api/v1/api.go | 15 +- web/api/v1/api_test.go | 27 +- web/api/v1/errors_test.go | 6 +- web/federate.go | 9 +- web/web.go | 21 +- 162 files changed, 1534 insertions(+), 1691 deletions(-) delete mode 100644 util/logging/ratelimit.go delete mode 100644 util/testutil/logging.go diff --git a/.golangci.yml b/.golangci.yml index d476be743b..c512101e1b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,6 +23,7 @@ linters: - usestdlibvars - whitespace - loggercheck + - sloglint issues: max-issues-per-linter: 0 @@ -100,8 +101,6 @@ linters-settings: - (net/http.ResponseWriter).Write # No need to check for errors on server's shutdown. - (*net/http.Server).Shutdown - # Never check for logger errors. - - (github.com/go-kit/log.Logger).Log # Never check for rollback errors as Rollback() is called when a previous error was detected. - (github.com/prometheus/prometheus/storage.Appender).Rollback goimports: diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index f670bc8b8c..b84b4edf68 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "math/bits" "net" @@ -37,8 +38,6 @@ import ( "github.com/KimMachineGun/automemlimit/memlimit" "github.com/alecthomas/kingpin/v2" "github.com/alecthomas/units" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/mwitkow/go-conntrack" "github.com/oklog/run" @@ -46,8 +45,8 @@ import ( "github.com/prometheus/client_golang/prometheus/collectors" versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/common/model" - "github.com/prometheus/common/promlog" - promlogflag "github.com/prometheus/common/promlog/flag" + "github.com/prometheus/common/promslog" + promslogflag "github.com/prometheus/common/promslog/flag" "github.com/prometheus/common/version" toolkit_web "github.com/prometheus/exporter-toolkit/web" "go.uber.org/atomic" @@ -81,6 +80,45 @@ import ( "github.com/prometheus/prometheus/web" ) +// klogv1OutputCallDepth is the stack depth where we can find the origin of this call. +const klogv1OutputCallDepth = 6 + +// klogv1DefaultPrefixLength is the length of the log prefix that we have to strip out. +const klogv1DefaultPrefixLength = 53 + +// klogv1Writer is used in SetOutputBySeverity call below to redirect any calls +// to klogv1 to end up in klogv2. +// This is a hack to support klogv1 without use of go-kit/log. It is inspired +// by klog's upstream klogv1/v2 coexistence example: +// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go +type klogv1Writer struct{} + +// Write redirects klogv1 calls to klogv2. +// This is a hack to support klogv1 without use of go-kit/log. It is inspired +// by klog's upstream klogv1/v2 coexistence example: +// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go +func (kw klogv1Writer) Write(p []byte) (n int, err error) { + if len(p) < klogv1DefaultPrefixLength { + klogv2.InfoDepth(klogv1OutputCallDepth, string(p)) + return len(p), nil + } + + switch p[0] { + case 'I': + klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + case 'W': + klogv2.WarningDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + case 'E': + klogv2.ErrorDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + case 'F': + klogv2.FatalDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + default: + klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + } + + return len(p), nil +} + var ( appName = "prometheus" @@ -171,82 +209,82 @@ type flagConfig struct { prometheusURL string corsRegexString string - promlogConfig promlog.Config - promqlEnableDelayedNameRemoval bool + + promslogConfig promslog.Config } // setFeatureListOptions sets the corresponding options from the featureList. -func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { +func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { for _, f := range c.featureList { opts := strings.Split(f, ",") for _, o := range opts { switch o { case "expand-external-labels": c.enableExpandExternalLabels = true - level.Info(logger).Log("msg", "Experimental expand-external-labels enabled") + logger.Info("Experimental expand-external-labels enabled") case "exemplar-storage": c.tsdb.EnableExemplarStorage = true - level.Info(logger).Log("msg", "Experimental in-memory exemplar storage enabled") + logger.Info("Experimental in-memory exemplar storage enabled") case "memory-snapshot-on-shutdown": c.tsdb.EnableMemorySnapshotOnShutdown = true - level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled") + logger.Info("Experimental memory snapshot on shutdown enabled") case "extra-scrape-metrics": c.scrape.ExtraMetrics = true - level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled") + logger.Info("Experimental additional scrape metrics enabled") case "metadata-wal-records": c.scrape.AppendMetadata = true - level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0") + logger.Info("Experimental metadata records in WAL enabled, required for remote write 2.0") case "promql-per-step-stats": c.enablePerStepStats = true - level.Info(logger).Log("msg", "Experimental per-step statistics reporting") + logger.Info("Experimental per-step statistics reporting") case "auto-gomaxprocs": c.enableAutoGOMAXPROCS = true - level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota") + logger.Info("Automatically set GOMAXPROCS to match Linux container CPU quota") case "auto-reload-config": c.enableAutoReload = true if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 { c.autoReloadInterval, _ = model.ParseDuration("1s") } - level.Info(logger).Log("msg", fmt.Sprintf("Enabled automatic configuration file reloading. Checking for configuration changes every %s.", c.autoReloadInterval)) + logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval) case "auto-gomemlimit": c.enableAutoGOMEMLIMIT = true - level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit") + logger.Info("Automatically set GOMEMLIMIT to match Linux container or system memory limit") case "concurrent-rule-eval": c.enableConcurrentRuleEval = true - level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.") + logger.Info("Experimental concurrent rule evaluation enabled.") case "promql-experimental-functions": parser.EnableExperimentalFunctions = true - level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") + logger.Info("Experimental PromQL functions enabled.") case "native-histograms": c.tsdb.EnableNativeHistograms = true c.scrape.EnableNativeHistogramsIngestion = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols - level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + logger.Info("Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) case "ooo-native-histograms": c.tsdb.EnableOOONativeHistograms = true - level.Info(logger).Log("msg", "Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true") + logger.Info("Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true") case "created-timestamp-zero-ingestion": c.scrape.EnableCreatedTimestampZeroIngestion = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols - level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) case "delayed-compaction": c.tsdb.EnableDelayedCompaction = true - level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.") + logger.Info("Experimental delayed compaction is enabled.") case "promql-delayed-name-removal": c.promqlEnableDelayedNameRemoval = true - level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.") + logger.Info("Experimental PromQL delayed name removal enabled.") case "": continue case "old-ui": c.web.UseOldUI = true - level.Info(logger).Log("msg", "Serving previous version of the Prometheus web UI.") + logger.Info("Serving previous version of the Prometheus web UI.") default: - level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o) + logger.Warn("Unknown option for --enable-feature", "option", o) } } } @@ -280,7 +318,7 @@ func main() { Registerer: prometheus.DefaultRegisterer, Gatherer: prometheus.DefaultGatherer, }, - promlogConfig: promlog.Config{}, + promslogConfig: promslog.Config{}, } a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout) @@ -483,7 +521,7 @@ func main() { a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) - promlogflag.AddFlags(a, &cfg.promlogConfig) + promslogflag.AddFlags(a, &cfg.promslogConfig) a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error { if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil { @@ -501,7 +539,8 @@ func main() { os.Exit(2) } - logger := promlog.New(&cfg.promlogConfig) + logger := promslog.New(&cfg.promslogConfig) + slog.SetDefault(logger) notifs := notifications.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) cfg.web.NotificationsSub = notifs.Sub @@ -556,12 +595,12 @@ func main() { // Throw error for invalid config before starting other components. var cfgFile *config.Config - if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil { + if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, promslog.NewNopLogger()); err != nil { absPath, pathErr := filepath.Abs(cfg.configFile) if pathErr != nil { absPath = cfg.configFile } - level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err) + logger.Error(fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err) os.Exit(2) } if _, err := cfgFile.GetScrapeConfigs(); err != nil { @@ -569,7 +608,7 @@ func main() { if pathErr != nil { absPath = cfg.configFile } - level.Error(logger).Log("msg", fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) + logger.Error(fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) os.Exit(2) } if cfg.tsdb.EnableExemplarStorage { @@ -602,7 +641,7 @@ func main() { if !agentMode { if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 { cfg.tsdb.RetentionDuration = defaultRetentionDuration - level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) + logger.Info("No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) } // Check for overflows. This limits our max retention to 100y. @@ -612,7 +651,7 @@ func main() { panic(err) } cfg.tsdb.RetentionDuration = y - level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String()) + logger.Warn("Time retention value is too high. Limiting to: " + y.String()) } // Max block size settings. @@ -633,11 +672,8 @@ func main() { noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{} noStepSubqueryInterval.Set(config.DefaultGlobalConfig.EvaluationInterval) - // Above level 6, the k8s client would log bearer tokens in clear-text. - klog.ClampLevel(6) - klog.SetLogger(log.With(logger, "component", "k8s_client_runtime")) - klogv2.ClampLevel(6) - klogv2.SetLogger(log.With(logger, "component", "k8s_client_runtime")) + klogv2.SetSlogLogger(logger.With("component", "k8s_client_runtime")) + klog.SetOutputBySeverity("INFO", klogv1Writer{}) modeAppName := "Prometheus Server" mode := "server" @@ -646,20 +682,22 @@ func main() { mode = "agent" } - level.Info(logger).Log("msg", "Starting "+modeAppName, "mode", mode, "version", version.Info()) + logger.Info("Starting "+modeAppName, "mode", mode, "version", version.Info()) if bits.UintSize < 64 { - level.Warn(logger).Log("msg", "This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH) + logger.Warn("This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH) } - level.Info(logger).Log("build_context", version.BuildContext()) - level.Info(logger).Log("host_details", prom_runtime.Uname()) - level.Info(logger).Log("fd_limits", prom_runtime.FdLimits()) - level.Info(logger).Log("vm_limits", prom_runtime.VMLimits()) + logger.Info("operational information", + "build_context", version.BuildContext(), + "host_details", prom_runtime.Uname(), + "fd_limits", prom_runtime.FdLimits(), + "vm_limits", prom_runtime.VMLimits(), + ) var ( localStorage = &readyStorage{stats: tsdb.NewDBStats()} scraper = &readyScrapeManager{} - remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata) + remoteStorage = remote.NewStorage(logger.With("component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) ) @@ -667,7 +705,7 @@ func main() { ctxWeb, cancelWeb = context.WithCancel(context.Background()) ctxRule = context.Background() - notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) + notifierManager = notifier.NewManager(&cfg.notifier, logger.With("component", "notifier")) ctxScrape, cancelScrape = context.WithCancel(context.Background()) ctxNotify, cancelNotify = context.WithCancel(context.Background()) @@ -682,37 +720,37 @@ func main() { // they are not specific to an SD instance. err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer) if err != nil { - level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err) + logger.Error("failed to register Kubernetes client metrics", "err", err) os.Exit(1) } sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer) if err != nil { - level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err) + logger.Error("failed to register service discovery metrics", "err", err) os.Exit(1) } - discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) + discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) if discoveryManagerScrape == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager scrape") + logger.Error("failed to create a discovery manager scrape") os.Exit(1) } - discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) + discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) if discoveryManagerNotify == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager notify") + logger.Error("failed to create a discovery manager notify") os.Exit(1) } scrapeManager, err := scrape.NewManager( &cfg.scrape, - log.With(logger, "component", "scrape manager"), - func(s string) (log.Logger, error) { return logging.NewJSONFileLogger(s) }, + logger.With("component", "scrape manager"), + logging.NewJSONFileLogger, fanoutStorage, prometheus.DefaultRegisterer, ) if err != nil { - level.Error(logger).Log("msg", "failed to create a scrape manager", "err", err) + logger.Error("failed to create a scrape manager", "err", err) os.Exit(1) } @@ -725,10 +763,10 @@ func main() { if cfg.enableAutoGOMAXPROCS { l := func(format string, a ...interface{}) { - level.Info(logger).Log("component", "automaxprocs", "msg", fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...)) + logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs") } if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil { - level.Warn(logger).Log("component", "automaxprocs", "msg", "Failed to set GOMAXPROCS automatically", "err", err) + logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err) } } @@ -742,17 +780,17 @@ func main() { ), ), ); err != nil { - level.Warn(logger).Log("component", "automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err) + logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err) } } if !agentMode { opts := promql.EngineOpts{ - Logger: log.With(logger, "component", "query engine"), + Logger: logger.With("component", "query engine"), Reg: prometheus.DefaultRegisterer, MaxSamples: cfg.queryMaxSamples, Timeout: time.Duration(cfg.queryTimeout), - ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")), + ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, logger.With("component", "activeQueryTracker")), LookbackDelta: time.Duration(cfg.lookbackDelta), NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get, // EnableAtModifier and EnableNegativeOffset have to be @@ -773,7 +811,7 @@ func main() { Context: ctxRule, ExternalURL: cfg.web.ExternalURL, Registerer: prometheus.DefaultRegisterer, - Logger: log.With(logger, "component", "rule manager"), + Logger: logger.With("component", "rule manager"), OutageTolerance: time.Duration(cfg.outageTolerance), ForGracePeriod: time.Duration(cfg.forGracePeriod), ResendDelay: time.Duration(cfg.resendDelay), @@ -824,7 +862,7 @@ func main() { } // Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager. - webHandler := web.New(log.With(logger, "component", "web"), &cfg.web) + webHandler := web.New(logger.With("component", "web"), &cfg.web) // Monitor outgoing connections on default transport with conntrack. http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc( @@ -951,18 +989,18 @@ func main() { listeners, err := webHandler.Listeners() if err != nil { - level.Error(logger).Log("msg", "Unable to start web listeners", "err", err) + logger.Error("Unable to start web listener", "err", err) if err := queryEngine.Close(); err != nil { - level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + logger.Warn("Closing query engine failed", "err", err) } os.Exit(1) } err = toolkit_web.Validate(*webConfig) if err != nil { - level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err) + logger.Error("Unable to validate web configuration file", "err", err) if err := queryEngine.Close(); err != nil { - level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + logger.Warn("Closing query engine failed", "err", err) } os.Exit(1) } @@ -978,15 +1016,15 @@ func main() { // Don't forget to release the reloadReady channel so that waiting blocks can exit normally. select { case sig := <-term: - level.Warn(logger).Log("msg", "Received an OS signal, exiting gracefully...", "signal", sig.String()) + logger.Warn("Received an OS signal, exiting gracefully...", "signal", sig.String()) reloadReady.Close() case <-webHandler.Quit(): - level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...") + logger.Warn("Received termination request via web service, exiting gracefully...") case <-cancel: reloadReady.Close() } if err := queryEngine.Close(); err != nil { - level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + logger.Warn("Closing query engine failed", "err", err) } return nil }, @@ -1002,11 +1040,11 @@ func main() { g.Add( func() error { err := discoveryManagerScrape.Run() - level.Info(logger).Log("msg", "Scrape discovery manager stopped") + logger.Info("Scrape discovery manager stopped") return err }, func(err error) { - level.Info(logger).Log("msg", "Stopping scrape discovery manager...") + logger.Info("Stopping scrape discovery manager...") cancelScrape() }, ) @@ -1016,11 +1054,11 @@ func main() { g.Add( func() error { err := discoveryManagerNotify.Run() - level.Info(logger).Log("msg", "Notify discovery manager stopped") + logger.Info("Notify discovery manager stopped") return err }, func(err error) { - level.Info(logger).Log("msg", "Stopping notify discovery manager...") + logger.Info("Stopping notify discovery manager...") cancelNotify() }, ) @@ -1049,7 +1087,7 @@ func main() { <-reloadReady.C err := scrapeManager.Run(discoveryManagerScrape.SyncCh()) - level.Info(logger).Log("msg", "Scrape manager stopped") + logger.Info("Scrape manager stopped") return err }, func(err error) { @@ -1057,7 +1095,7 @@ func main() { // so that it doesn't try to write samples to a closed storage. // We should also wait for rule manager to be fully stopped to ensure // we don't trigger any false positive alerts for rules using absent(). - level.Info(logger).Log("msg", "Stopping scrape manager...") + logger.Info("Stopping scrape manager...") scrapeManager.Stop() }, ) @@ -1088,7 +1126,7 @@ func main() { if cfg.enableAutoReload { checksum, err = config.GenerateChecksum(cfg.configFile) if err != nil { - level.Error(logger).Log("msg", "Failed to generate initial checksum for configuration file", "err", err) + logger.Error("Failed to generate initial checksum for configuration file", "err", err) } } @@ -1108,17 +1146,17 @@ func main() { select { case <-hup: if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { - level.Error(logger).Log("msg", "Error reloading config", "err", err) + logger.Error("Error reloading config", "err", err) } else if cfg.enableAutoReload { if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { checksum = currentChecksum } else { - level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) + logger.Error("Failed to generate checksum during configuration reload", "err", err) } } case rc := <-webHandler.Reload(): if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { - level.Error(logger).Log("msg", "Error reloading config", "err", err) + logger.Error("Error reloading config", "err", err) rc <- err } else { rc <- nil @@ -1126,7 +1164,7 @@ func main() { if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { checksum = currentChecksum } else { - level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) + logger.Error("Failed to generate checksum during configuration reload", "err", err) } } } @@ -1136,14 +1174,14 @@ func main() { } currentChecksum, err := config.GenerateChecksum(cfg.configFile) if err != nil { - level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) + logger.Error("Failed to generate checksum during configuration reload", "err", err) } else if currentChecksum == checksum { continue } - level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.") + logger.Info("Configuration file change detected, reloading the configuration.") if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { - level.Error(logger).Log("msg", "Error reloading config", "err", err) + logger.Error("Error reloading config", "err", err) } else { checksum = currentChecksum } @@ -1180,7 +1218,7 @@ func main() { webHandler.SetReady(web.Ready) notifs.DeleteNotification(notifications.StartingUp) - level.Info(logger).Log("msg", "Server is ready to receive web requests.") + logger.Info("Server is ready to receive web requests.") <-cancel return nil }, @@ -1195,7 +1233,7 @@ func main() { cancel := make(chan struct{}) g.Add( func() error { - level.Info(logger).Log("msg", "Starting TSDB ...") + logger.Info("Starting TSDB ...") if cfg.tsdb.WALSegmentSize != 0 { if cfg.tsdb.WALSegmentSize < 10*1024*1024 || cfg.tsdb.WALSegmentSize > 256*1024*1024 { return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB") @@ -1214,13 +1252,13 @@ func main() { switch fsType := prom_runtime.Statfs(localStoragePath); fsType { case "NFS_SUPER_MAGIC": - level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") + logger.Warn("This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.", "fs_type", fsType) default: - level.Info(logger).Log("fs_type", fsType) + logger.Info("filesystem information", "fs_type", fsType) } - level.Info(logger).Log("msg", "TSDB started") - level.Debug(logger).Log("msg", "TSDB options", + logger.Info("TSDB started") + logger.Debug("TSDB options", "MinBlockDuration", cfg.tsdb.MinBlockDuration, "MaxBlockDuration", cfg.tsdb.MaxBlockDuration, "MaxBytes", cfg.tsdb.MaxBytes, @@ -1239,7 +1277,7 @@ func main() { }, func(err error) { if err := fanoutStorage.Close(); err != nil { - level.Error(logger).Log("msg", "Error stopping storage", "err", err) + logger.Error("Error stopping storage", "err", err) } close(cancel) }, @@ -1251,7 +1289,7 @@ func main() { cancel := make(chan struct{}) g.Add( func() error { - level.Info(logger).Log("msg", "Starting WAL storage ...") + logger.Info("Starting WAL storage ...") if cfg.agent.WALSegmentSize != 0 { if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 { return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB") @@ -1270,13 +1308,13 @@ func main() { switch fsType := prom_runtime.Statfs(localStoragePath); fsType { case "NFS_SUPER_MAGIC": - level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") + logger.Warn(fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") default: - level.Info(logger).Log("fs_type", fsType) + logger.Info(fsType) } - level.Info(logger).Log("msg", "Agent WAL storage started") - level.Debug(logger).Log("msg", "Agent WAL storage options", + logger.Info("Agent WAL storage started") + logger.Debug("Agent WAL storage options", "WALSegmentSize", cfg.agent.WALSegmentSize, "WALCompression", cfg.agent.WALCompression, "StripeSize", cfg.agent.StripeSize, @@ -1294,7 +1332,7 @@ func main() { }, func(e error) { if err := fanoutStorage.Close(); err != nil { - level.Error(logger).Log("msg", "Error stopping storage", "err", err) + logger.Error("Error stopping storage", "err", err) } close(cancel) }, @@ -1328,7 +1366,7 @@ func main() { <-reloadReady.C notifierManager.Run(discoveryManagerNotify.SyncCh()) - level.Info(logger).Log("msg", "Notifier manager stopped") + logger.Info("Notifier manager stopped") return nil }, func(err error) { @@ -1337,16 +1375,16 @@ func main() { ) } if err := g.Run(); err != nil { - level.Error(logger).Log("err", err) + logger.Error("Error running goroutines from run.Group", "err", err) os.Exit(1) } - level.Info(logger).Log("msg", "See you next time!") + logger.Info("See you next time!") } -func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) { +func openDBWithMetrics(dir string, logger *slog.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) { db, err := tsdb.Open( dir, - log.With(logger, "component", "tsdb"), + logger.With("component", "tsdb"), reg, opts, stats, @@ -1399,10 +1437,10 @@ type reloader struct { reloader func(*config.Config) error } -func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) { +func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) { start := time.Now() - timings := []interface{}{} - level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) + timingsLogger := logger + logger.Info("Loading configuration file", "filename", filename) defer func() { if err == nil { @@ -1430,10 +1468,10 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b for _, rl := range rls { rstart := time.Now() if err := rl.reloader(conf); err != nil { - level.Error(logger).Log("msg", "Failed to apply configuration", "err", err) + logger.Error("Failed to apply configuration", "err", err) failed = true } - timings = append(timings, rl.name, time.Since(rstart)) + timingsLogger = timingsLogger.With((rl.name), time.Since(rstart)) } if failed { return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) @@ -1441,7 +1479,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC) if oldGoGC != conf.Runtime.GoGC { - level.Info(logger).Log("msg", "updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) + logger.Info("updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) } // Write the new setting out to the ENV var for runtime API output. if conf.Runtime.GoGC >= 0 { @@ -1451,8 +1489,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b } noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval) - l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)} - level.Info(logger).Log(append(l, timings...)...) + timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)) return nil } diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index c16864cb8c..d0c2846bec 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -31,9 +31,9 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" @@ -295,7 +295,7 @@ func TestTimeMetrics(t *testing.T) { tmpDir := t.TempDir() reg := prometheus.NewRegistry() - db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil) + db, err := openDBWithMetrics(tmpDir, promslog.NewNopLogger(), reg, nil, nil) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index 3b00230cd9..f05ad9df2a 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -442,6 +442,7 @@ func readQueryLog(t *testing.T, path string) []queryLogLine { file, err := os.Open(path) require.NoError(t, err) defer file.Close() + scanner := bufio.NewScanner(file) for scanner.Scan() { var q queryLogLine diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 16491f0416..1408975df9 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -21,9 +21,10 @@ import ( "math" "time" - "github.com/go-kit/log" "github.com/oklog/ulid" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/tsdb" @@ -120,7 +121,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn // also need to append samples throughout the whole block range. To allow that, we // pretend that the block is twice as large here, but only really add sample in the // original interval later. - w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, 2*blockDuration) + w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), outputDir, 2*blockDuration) if err != nil { return fmt.Errorf("block writer: %w", err) } diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 48f9be9309..159fae764d 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -32,13 +32,13 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" "github.com/google/pprof/profile" "github.com/prometheus/client_golang/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil/promlint" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" "gopkg.in/yaml.v2" @@ -575,7 +575,7 @@ func checkFileExists(fn string) error { func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) { fmt.Println("Checking", filename) - cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger()) + cfg, err := config.LoadFile(filename, agentMode, false, promslog.NewNopLogger()) if err != nil { return nil, err } @@ -1182,7 +1182,7 @@ func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outpu return fmt.Errorf("new api client error: %w", err) } - ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, api) + ruleImporter := newRuleImporter(promslog.New(&promslog.Config{}), cfg, api) errs := ruleImporter.loadGroups(ctx, files) for _, err := range errs { if err != nil { diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index 5a18644842..adb214b812 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -16,12 +16,12 @@ package main import ( "context" "fmt" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -38,7 +38,7 @@ type queryRangeAPI interface { } type ruleImporter struct { - logger log.Logger + logger *slog.Logger config ruleImporterConfig apiClient queryRangeAPI @@ -57,8 +57,8 @@ type ruleImporterConfig struct { // newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series // written to disk in blocks. -func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter { - level.Info(logger).Log("backfiller", "new rule importer", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822)) +func newRuleImporter(logger *slog.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter { + logger.Info("new rule importer", "component", "backfiller", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822)) return &ruleImporter{ logger: logger, config: config, @@ -80,10 +80,10 @@ func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) // importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks. func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) { for name, group := range importer.groups { - level.Info(importer.logger).Log("backfiller", "processing group", "name", name) + importer.logger.Info("processing group", "component", "backfiller", "name", name) for i, r := range group.Rules() { - level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name()) + importer.logger.Info("processing rule", "component", "backfiller", "id", i, "name", r.Name()) if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil { errs = append(errs, err) } @@ -124,7 +124,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName return fmt.Errorf("query range: %w", err) } if warnings != nil { - level.Warn(importer.logger).Log("msg", "Range query returned warnings.", "warnings", warnings) + importer.logger.Warn("Range query returned warnings.", "warnings", warnings) } // To prevent races with compaction, a block writer only allows appending samples @@ -133,7 +133,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName // also need to append samples throughout the whole block range. To allow that, we // pretend that the block is twice as large here, but only really add sample in the // original interval later. - w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration) + w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), importer.config.outputDir, 2*blockDuration) if err != nil { return fmt.Errorf("new block writer: %w", err) } diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index d55fb0c896..94e28e570d 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -21,9 +21,9 @@ import ( "testing" "time" - "github.com/go-kit/log" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" @@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) { } func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { - logger := log.NewNopLogger() + logger := promslog.NewNopLogger() cfg := ruleImporterConfig{ outputDir: tmpDir, start: start.Add(-10 * time.Hour), diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go index 6c0e896ffe..5c00dab03a 100644 --- a/cmd/promtool/sd.go +++ b/cmd/promtool/sd.go @@ -20,9 +20,9 @@ import ( "os" "time" - "github.com/go-kit/log" "github.com/google/go-cmp/cmp" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" @@ -39,7 +39,7 @@ type sdCheckResult struct { // CheckSD performs service discovery for the given job name and reports the results. func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger := promslog.New(&promslog.Config{}) cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) if err != nil { diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index e6323d2944..727275aa6b 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "runtime" @@ -32,9 +33,10 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" "go.uber.org/atomic" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" @@ -60,7 +62,7 @@ type writeBenchmark struct { memprof *os.File blockprof *os.File mtxprof *os.File - logger log.Logger + logger *slog.Logger } func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error { @@ -68,7 +70,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err outPath: outPath, samplesFile: samplesFile, numMetrics: numMetrics, - logger: log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), + logger: promslog.New(&promslog.Config{}), } if b.outPath == "" { dir, err := os.MkdirTemp("", "tsdb_bench") @@ -87,9 +89,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err dir := filepath.Join(b.outPath, "storage") - l := log.With(b.logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - - st, err := tsdb.Open(dir, l, nil, &tsdb.Options{ + st, err := tsdb.Open(dir, b.logger, nil, &tsdb.Options{ RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), MinBlockDuration: int64(2 * time.Hour / time.Millisecond), }, tsdb.NewDBStats()) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 7030635d1c..667e748061 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -26,13 +26,13 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/google/go-cmp/cmp" "github.com/grafana/regexp" "github.com/nsf/jsondiff" "gopkg.in/yaml.v2" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -218,7 +218,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i Appendable: suite.Storage(), Context: context.Background(), NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {}, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } m := rules.NewManager(opts) groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...) diff --git a/config/config.go b/config/config.go index 6dcb461026..3f35a195d0 100644 --- a/config/config.go +++ b/config/config.go @@ -16,6 +16,7 @@ package config import ( "errors" "fmt" + "log/slog" "net/url" "os" "path/filepath" @@ -25,8 +26,6 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -73,7 +72,7 @@ const ( ) // Load parses the YAML input s into a Config. -func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, error) { cfg := &Config{} // If the entire config body is empty the UnmarshalYAML method is // never called. We thus have to set the DefaultConfig at the entry @@ -98,11 +97,11 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro if v := os.Getenv(s); v != "" { return v } - level.Warn(logger).Log("msg", "Empty environment variable", "name", s) + logger.Warn("Empty environment variable", "name", s) return "" }) if newV != v.Value { - level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) + logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV) } // Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024 b.Add(v.Name, newV) @@ -112,7 +111,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro } // LoadFile parses the given YAML file into a Config. -func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func LoadFile(filename string, agentMode, expandExternalLabels bool, logger *slog.Logger) (*Config, error) { content, err := os.ReadFile(filename) if err != nil { return nil, err diff --git a/config/config_test.go b/config/config_test.go index 47241e6212..07f071ffee 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -24,10 +24,10 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -1501,7 +1501,7 @@ var expectedConf = &Config{ } func TestYAMLRoundtrip(t *testing.T) { - want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger()) + want, err := LoadFile("testdata/roundtrip.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -1514,7 +1514,7 @@ func TestYAMLRoundtrip(t *testing.T) { } func TestRemoteWriteRetryOnRateLimit(t *testing.T) { - want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger()) + want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -1529,7 +1529,7 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) { func TestOTLPSanitizeResourceAttributes(t *testing.T) { t.Run("good config", func(t *testing.T) { - want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger()) + want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -1541,7 +1541,7 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) { }) t.Run("bad config", func(t *testing.T) { - _, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger()) + _, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, promslog.NewNopLogger()) require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`) require.ErrorContains(t, err, `empty promoted OTel resource attribute`) }) @@ -1550,16 +1550,16 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) { func TestLoadConfig(t *testing.T) { // Parse a valid file that sets a global scrape timeout. This tests whether parsing // an overwritten default field in the global config permanently changes the default. - _, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/global_timeout.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) - c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) + c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) require.Equal(t, expectedConf, c) } func TestScrapeIntervalLarger(t *testing.T) { - c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger()) + c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) require.Len(t, c.ScrapeConfigs, 1) for _, sc := range c.ScrapeConfigs { @@ -1569,7 +1569,7 @@ func TestScrapeIntervalLarger(t *testing.T) { // YAML marshaling must not reveal authentication credentials. func TestElideSecrets(t *testing.T) { - c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) + c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) secretRe := regexp.MustCompile(`\\u003csecret\\u003e|`) @@ -1586,31 +1586,31 @@ func TestElideSecrets(t *testing.T) { func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) { // Parse a valid file that sets a rule files with an absolute path - c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger()) + c, err := LoadFile(ruleFilesConfigFile, false, false, promslog.NewNopLogger()) require.NoError(t, err) require.Equal(t, ruleFilesExpectedConf, c) } func TestKubernetesEmptyAPIServer(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) } func TestKubernetesWithKubeConfig(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) } func TestKubernetesSelectors(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) } @@ -2094,7 +2094,7 @@ func TestBadConfigs(t *testing.T) { model.NameValidationScheme = model.UTF8Validation }() for _, ee := range expectedErrors { - _, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/"+ee.filename, false, false, promslog.NewNopLogger()) require.ErrorContains(t, err, ee.errMsg, "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) } @@ -2125,7 +2125,7 @@ func TestBadStaticConfigsYML(t *testing.T) { } func TestEmptyConfig(t *testing.T) { - c, err := Load("", false, log.NewNopLogger()) + c, err := Load("", false, promslog.NewNopLogger()) require.NoError(t, err) exp := DefaultConfig require.Equal(t, exp, *c) @@ -2135,38 +2135,38 @@ func TestExpandExternalLabels(t *testing.T) { // Cleanup ant TEST env variable that could exist on the system. os.Setenv("TEST", "") - c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger()) + c, err := LoadFile("testdata/external_labels.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels) - c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) + c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger()) require.NoError(t, err) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) os.Setenv("TEST", "TestValue") - c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) + c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger()) require.NoError(t, err) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) } func TestAgentMode(t *testing.T) { - _, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, log.NewNopLogger()) + _, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, promslog.NewNopLogger()) require.ErrorContains(t, err, "field alerting is not allowed in agent mode") - _, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, log.NewNopLogger()) + _, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, promslog.NewNopLogger()) require.ErrorContains(t, err, "field alerting is not allowed in agent mode") - _, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, log.NewNopLogger()) + _, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, promslog.NewNopLogger()) require.ErrorContains(t, err, "field rule_files is not allowed in agent mode") - _, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, log.NewNopLogger()) + _, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, promslog.NewNopLogger()) require.ErrorContains(t, err, "field remote_read is not allowed in agent mode") - c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger()) + c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, promslog.NewNopLogger()) require.NoError(t, err) require.Empty(t, c.RemoteWriteConfigs) - c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger()) + c, err = LoadFile("testdata/agent_mode.good.yml", true, false, promslog.NewNopLogger()) require.NoError(t, err) require.Len(t, c.RemoteWriteConfigs, 1) require.Equal( @@ -2177,7 +2177,7 @@ func TestAgentMode(t *testing.T) { } func TestEmptyGlobalBlock(t *testing.T) { - c, err := Load("global:\n", false, log.NewNopLogger()) + c, err := Load("global:\n", false, promslog.NewNopLogger()) require.NoError(t, err) exp := DefaultConfig exp.Runtime = DefaultRuntimeConfig @@ -2332,7 +2332,7 @@ func TestGetScrapeConfigs(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - c, err := LoadFile(tc.configFile, false, false, log.NewNopLogger()) + c, err := LoadFile(tc.configFile, false, false, promslog.NewNopLogger()) require.NoError(t, err) scfgs, err := c.GetScrapeConfigs() @@ -2350,7 +2350,7 @@ func kubernetesSDHostURL() config.URL { } func TestScrapeConfigDisableCompression(t *testing.T) { - want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, log.NewNopLogger()) + want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -2397,7 +2397,7 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, log.NewNopLogger()) + want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) diff --git a/discovery/README.md b/discovery/README.md index 4c06608625..d5418e7fb1 100644 --- a/discovery/README.md +++ b/discovery/README.md @@ -233,7 +233,7 @@ type Config interface { } type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger // A registerer for the Discoverer's metrics. Registerer prometheus.Registerer diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index a44912481a..51eec8dba4 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" @@ -29,11 +30,10 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -146,7 +146,7 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // the Discoverer interface. type EC2Discovery struct { *refresh.Discovery - logger log.Logger + logger *slog.Logger cfg *EC2SDConfig ec2 *ec2.EC2 @@ -157,14 +157,14 @@ type EC2Discovery struct { } // NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets. -func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { +func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { m, ok := metrics.(*ec2Metrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } d := &EC2Discovery{ logger: logger, @@ -254,8 +254,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error // Prometheus requires a reload if AWS adds a new AZ to the region. if d.azToAZID == nil { if err := d.refreshAZIDs(ctx); err != nil { - level.Debug(d.logger).Log( - "msg", "Unable to describe availability zones", + d.logger.Debug( + "Unable to describe availability zones", "err", err) } } @@ -296,8 +296,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone) azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone] if !ok && d.azToAZID != nil { - level.Debug(d.logger).Log( - "msg", "Availability zone ID not found", + d.logger.Debug( + "Availability zone ID not found", "az", *inst.Placement.AvailabilityZone) } labels[ec2LabelAZID] = model.LabelValue(azID) diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index 0ad7f2d541..0b046be6d9 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" @@ -29,10 +30,10 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/lightsail" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -130,14 +131,14 @@ type LightsailDiscovery struct { } // NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets. -func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { +func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { m, ok := metrics.(*lightsailMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } d := &LightsailDiscovery{ diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 70d95b9f3a..cbf70048de 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "math/rand" "net" "net/http" @@ -35,10 +36,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/lru" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -175,7 +175,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { type Discovery struct { *refresh.Discovery - logger log.Logger + logger *slog.Logger cfg *SDConfig port int cache *cache.Cache[string, *armnetwork.Interface] @@ -183,14 +183,14 @@ type Discovery struct { } // NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets. -func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*azureMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000))) d := &Discovery{ @@ -228,13 +228,13 @@ type azureClient struct { vm *armcompute.VirtualMachinesClient vmss *armcompute.VirtualMachineScaleSetsClient vmssvm *armcompute.VirtualMachineScaleSetVMsClient - logger log.Logger + logger *slog.Logger } var _ client = &azureClient{} // createAzureClient is a helper function for creating an Azure compute client to ARM. -func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) { +func createAzureClient(cfg SDConfig, logger *slog.Logger) (client, error) { cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment) if err != nil { return &azureClient{}, err @@ -337,21 +337,21 @@ type virtualMachine struct { } // Create a new azureResource object from an ID string. -func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, error) { +func newAzureResourceFromID(id string, logger *slog.Logger) (*arm.ResourceID, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } resourceID, err := arm.ParseResourceID(id) if err != nil { err := fmt.Errorf("invalid ID '%s': %w", id, err) - level.Error(logger).Log("err", err) + logger.Error("Failed to parse resource ID", "err", err) return &arm.ResourceID{}, err } return resourceID, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { - defer level.Debug(d.logger).Log("msg", "Azure discovery completed") + defer d.logger.Debug("Azure discovery completed") client, err := createAzureClient(*d.cfg, d.logger) if err != nil { @@ -365,7 +365,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { return nil, fmt.Errorf("could not get virtual machines: %w", err) } - level.Debug(d.logger).Log("msg", "Found virtual machines during Azure discovery.", "count", len(machines)) + d.logger.Debug("Found virtual machines during Azure discovery.", "count", len(machines)) // Load the vms managed by scale sets. scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup) @@ -459,7 +459,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM } if err != nil { if errors.Is(err, errorNotFound) { - level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err) + d.logger.Warn("Network interface does not exist", "name", nicID, "err", err) } else { return nil, err } @@ -480,7 +480,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM // yet support this. On deallocated machines, this value happens to be nil so it // is a cheap and easy way to determine if a machine is allocated or not. if networkInterface.Properties.Primary == nil { - level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name) + d.logger.Debug("Skipping deallocated virtual machine", "machine", vm.Name) return nil, nil } @@ -724,7 +724,7 @@ func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) { rs := time.Duration(random) * time.Second exptime := time.Duration(d.cfg.RefreshInterval*10) + rs d.cache.Set(nicID, netInt, cache.WithExpiration(exptime)) - level.Debug(d.logger).Log("msg", "Adding nic", "nic", nicID, "time", exptime.Seconds()) + d.logger.Debug("Adding nic", "nic", nicID, "time", exptime.Seconds()) } // getFromCache will get the network Interface for the specified nicID diff --git a/discovery/azure/azure_test.go b/discovery/azure/azure_test.go index 32dab66c8c..e54f31fb03 100644 --- a/discovery/azure/azure_test.go +++ b/discovery/azure/azure_test.go @@ -23,7 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/lru" - "github.com/go-kit/log" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" ) @@ -150,7 +150,7 @@ func TestVMToLabelSet(t *testing.T) { cfg := DefaultSDConfig d := &Discovery{ cfg: &cfg, - logger: log.NewNopLogger(), + logger: promslog.NewNopLogger(), cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))), } network := armnetwork.Interface{ diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index bdc1fc8dce..e156bae6a1 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -17,17 +17,17 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" consul "github.com/hashicorp/consul/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -177,19 +177,19 @@ type Discovery struct { allowStale bool refreshInterval time.Duration finalizer func() - logger log.Logger + logger *slog.Logger metrics *consulMetrics } // NewDiscovery returns a new Discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*consulMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout)) @@ -282,7 +282,7 @@ func (d *Discovery) getDatacenter() error { info, err := d.client.Agent().Self() if err != nil { - level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) + d.logger.Error("Error retrieving datacenter name", "err", err) d.metrics.rpcFailuresCount.Inc() return err } @@ -290,12 +290,12 @@ func (d *Discovery) getDatacenter() error { dc, ok := info["Config"]["Datacenter"].(string) if !ok { err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"]) - level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) + d.logger.Error("Error retrieving datacenter name", "err", err) return err } d.clientDatacenter = dc - d.logger = log.With(d.logger, "datacenter", dc) + d.logger = d.logger.With("datacenter", dc) return nil } @@ -361,7 +361,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { // entire list of services. func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) { catalog := d.client.Catalog() - level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ",")) + d.logger.Debug("Watching services", "tags", strings.Join(d.watchedTags, ",")) opts := &consul.QueryOptions{ WaitIndex: *lastIndex, @@ -382,7 +382,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. } if err != nil { - level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err) + d.logger.Error("Error refreshing service list", "err", err) d.metrics.rpcFailuresCount.Inc() time.Sleep(retryInterval) return @@ -445,7 +445,7 @@ type consulService struct { discovery *Discovery client *consul.Client tagSeparator string - logger log.Logger + logger *slog.Logger rpcFailuresCount prometheus.Counter serviceRPCDuration prometheus.Observer } @@ -490,7 +490,7 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G // Get updates for a service. func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) { - level.Debug(srv.logger).Log("msg", "Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ",")) + srv.logger.Debug("Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ",")) opts := &consul.QueryOptions{ WaitIndex: *lastIndex, @@ -513,7 +513,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr } if err != nil { - level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err) + srv.logger.Error("Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err) srv.rpcFailuresCount.Inc() time.Sleep(retryInterval) return diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index e288a5b2ae..87b30fbae2 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -21,10 +21,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" "gopkg.in/yaml.v2" @@ -270,7 +270,7 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) { } func newDiscovery(t *testing.T, config *SDConfig) *Discovery { - logger := log.NewNopLogger() + logger := promslog.NewNopLogger() metrics := NewTestMetrics(t, config, prometheus.NewRegistry()) diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index ecee60cb1f..52f3a9c57a 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -16,6 +16,7 @@ package digitalocean import ( "context" "fmt" + "log/slog" "net" "net/http" "strconv" @@ -23,7 +24,6 @@ import ( "time" "github.com/digitalocean/godo" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -111,7 +111,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*digitaloceanMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/digitalocean/digitalocean_test.go b/discovery/digitalocean/digitalocean_test.go index 841b5ef977..a282225ac2 100644 --- a/discovery/digitalocean/digitalocean_test.go +++ b/discovery/digitalocean/digitalocean_test.go @@ -19,9 +19,9 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -57,7 +57,7 @@ func TestDigitalOceanSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Mock.Endpoint()) require.NoError(t, err) diff --git a/discovery/discovery.go b/discovery/discovery.go index 9a83df409b..c400de3632 100644 --- a/discovery/discovery.go +++ b/discovery/discovery.go @@ -15,9 +15,9 @@ package discovery import ( "context" + "log/slog" "reflect" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -47,7 +47,7 @@ type DiscovererMetrics interface { // DiscovererOptions provides options for a Discoverer. type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger Metrics DiscovererMetrics diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index 314c3d38cd..5de7f64886 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -17,17 +17,17 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/miekg/dns" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -111,21 +111,21 @@ type Discovery struct { names []string port int qtype uint16 - logger log.Logger + logger *slog.Logger metrics *dnsMetrics - lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) + lookupFn func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*dnsMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } qtype := dns.TypeSRV @@ -174,7 +174,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { for _, name := range d.names { go func(n string) { if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) { - level.Error(d.logger).Log("msg", "Error refreshing DNS targets", "err", err) + d.logger.Error("Error refreshing DNS targets", "err", err) } wg.Done() }(name) @@ -238,7 +238,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ // CNAME responses can occur with "Type: A" dns_sd_config requests. continue default: - level.Warn(d.logger).Log("msg", "Invalid record", "record", record) + d.logger.Warn("Invalid record", "record", record) continue } tg.Targets = append(tg.Targets, model.LabelSet{ @@ -288,7 +288,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ // error will be generic-looking, because trying to return all the errors // returned by the combination of all name permutations and servers is a // nightmare. -func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { +func lookupWithSearchPath(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { conf, err := dns.ClientConfigFromFile(resolvConf) if err != nil { return nil, fmt.Errorf("could not load resolv.conf: %w", err) @@ -337,14 +337,14 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms // A non-viable answer is "anything else", which encompasses both various // system-level problems (like network timeouts) and also // valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc). -func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger log.Logger) (*dns.Msg, error) { +func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger *slog.Logger) (*dns.Msg, error) { client := &dns.Client{} for _, server := range conf.Servers { servAddr := net.JoinHostPort(server, conf.Port) msg, err := askServerForName(name, qtype, client, servAddr, true) if err != nil { - level.Warn(logger).Log("msg", "DNS resolution failed", "server", server, "name", name, "err", err) + logger.Warn("DNS resolution failed", "server", server, "name", name, "err", err) continue } diff --git a/discovery/dns/dns_test.go b/discovery/dns/dns_test.go index 33a976827d..96bb32491f 100644 --- a/discovery/dns/dns_test.go +++ b/discovery/dns/dns_test.go @@ -16,11 +16,11 @@ package dns import ( "context" "fmt" + "log/slog" "net" "testing" "time" - "github.com/go-kit/log" "github.com/miekg/dns" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -40,7 +40,7 @@ func TestDNS(t *testing.T) { testCases := []struct { name string config SDConfig - lookup func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) + lookup func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) expected []*targetgroup.Group }{ @@ -52,7 +52,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "A", }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return nil, fmt.Errorf("some error") }, expected: []*targetgroup.Group{}, @@ -65,7 +65,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "A", }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.A{A: net.IPv4(192, 0, 2, 2)}, @@ -97,7 +97,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "AAAA", }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.AAAA{AAAA: net.IPv6loopback}, @@ -128,7 +128,7 @@ func TestDNS(t *testing.T) { Type: "SRV", RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.SRV{Port: 3306, Target: "db1.example.com."}, @@ -167,7 +167,7 @@ func TestDNS(t *testing.T) { Names: []string{"_mysql._tcp.db.example.com."}, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.SRV{Port: 3306, Target: "db1.example.com."}, @@ -198,7 +198,7 @@ func TestDNS(t *testing.T) { Names: []string{"_mysql._tcp.db.example.com."}, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{}, nil }, expected: []*targetgroup.Group{ @@ -215,7 +215,7 @@ func TestDNS(t *testing.T) { Port: 25, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.MX{Preference: 0, Mx: "smtp1.example.com."}, diff --git a/discovery/eureka/eureka.go b/discovery/eureka/eureka.go index 779c081aee..5087346486 100644 --- a/discovery/eureka/eureka.go +++ b/discovery/eureka/eureka.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "net/http" "net/url" "strconv" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -126,7 +126,7 @@ type Discovery struct { } // NewDiscovery creates a new Eureka discovery for the given role. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*eurekaMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/file/file.go b/discovery/file/file.go index e7e9d0870f..1c36b254cc 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -26,12 +27,11 @@ import ( "time" "github.com/fsnotify/fsnotify" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" @@ -175,20 +175,20 @@ type Discovery struct { // and how many target groups they contained. // This is used to detect deleted target groups. lastRefresh map[string]int - logger log.Logger + logger *slog.Logger metrics *fileMetrics } // NewDiscovery returns a new file discovery for the given paths. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { fm, ok := metrics.(*fileMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } disc := &Discovery{ @@ -210,7 +210,7 @@ func (d *Discovery) listFiles() []string { for _, p := range d.paths { files, err := filepath.Glob(p) if err != nil { - level.Error(d.logger).Log("msg", "Error expanding glob", "glob", p, "err", err) + d.logger.Error("Error expanding glob", "glob", p, "err", err) continue } paths = append(paths, files...) @@ -231,7 +231,7 @@ func (d *Discovery) watchFiles() { p = "./" } if err := d.watcher.Add(p); err != nil { - level.Error(d.logger).Log("msg", "Error adding file watch", "path", p, "err", err) + d.logger.Error("Error adding file watch", "path", p, "err", err) } } } @@ -240,7 +240,7 @@ func (d *Discovery) watchFiles() { func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { watcher, err := fsnotify.NewWatcher() if err != nil { - level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) + d.logger.Error("Error adding file watcher", "err", err) d.metrics.fileWatcherErrorsCount.Inc() return } @@ -280,7 +280,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { case err := <-d.watcher.Errors: if err != nil { - level.Error(d.logger).Log("msg", "Error watching file", "err", err) + d.logger.Error("Error watching file", "err", err) } } } @@ -300,7 +300,7 @@ func (d *Discovery) deleteTimestamp(filename string) { // stop shuts down the file watcher. func (d *Discovery) stop() { - level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths)) + d.logger.Debug("Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths)) done := make(chan struct{}) defer close(done) @@ -320,10 +320,10 @@ func (d *Discovery) stop() { } }() if err := d.watcher.Close(); err != nil { - level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err) + d.logger.Error("Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err) } - level.Debug(d.logger).Log("msg", "File discovery stopped") + d.logger.Debug("File discovery stopped") } // refresh reads all files matching the discovery's patterns and sends the respective @@ -339,7 +339,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) if err != nil { d.metrics.fileSDReadErrorsCount.Inc() - level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err) + d.logger.Error("Error reading file", "path", p, "err", err) // Prevent deletion down below. ref[p] = d.lastRefresh[p] continue @@ -356,7 +356,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) for f, n := range d.lastRefresh { m, ok := ref[f] if !ok || n > m { - level.Debug(d.logger).Log("msg", "file_sd refresh found file that should be removed", "file", f) + d.logger.Debug("file_sd refresh found file that should be removed", "file", f) d.deleteTimestamp(f) for i := m; i < n; i++ { select { diff --git a/discovery/gce/gce.go b/discovery/gce/gce.go index 15f32dd247..a509a144e1 100644 --- a/discovery/gce/gce.go +++ b/discovery/gce/gce.go @@ -17,12 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "golang.org/x/oauth2/google" @@ -129,7 +129,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*gceMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index df56f94c5f..ba64250c0f 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -15,12 +15,12 @@ package hetzner import ( "context" + "log/slog" "net" "net/http" "strconv" "time" - "github.com/go-kit/log" "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -58,7 +58,7 @@ type hcloudDiscovery struct { } // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. -func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) { +func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) { d := &hcloudDiscovery{ port: conf.Port, } diff --git a/discovery/hetzner/hcloud_test.go b/discovery/hetzner/hcloud_test.go index 10b799037a..fa8291625a 100644 --- a/discovery/hetzner/hcloud_test.go +++ b/discovery/hetzner/hcloud_test.go @@ -18,8 +18,8 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) @@ -43,7 +43,7 @@ func TestHCloudSDRefresh(t *testing.T) { cfg.HTTPClientConfig.BearerToken = hcloudTestToken cfg.hcloudEndpoint = suite.Mock.Endpoint() - d, err := newHcloudDiscovery(&cfg, log.NewNopLogger()) + d, err := newHcloudDiscovery(&cfg, promslog.NewNopLogger()) require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 69c823d382..980c197d77 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -17,9 +17,9 @@ import ( "context" "errors" "fmt" + "log/slog" "time" - "github.com/go-kit/log" "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -135,7 +135,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*hetznerMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") @@ -157,7 +157,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere ), nil } -func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { +func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) { switch conf.Role { case HetznerRoleHcloud: if conf.hcloudEndpoint == "" { diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 516470b05a..958f8f710f 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -18,13 +18,13 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -51,7 +51,7 @@ type robotDiscovery struct { } // newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets. -func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) { +func newRobotDiscovery(conf *SDConfig, _ *slog.Logger) (*robotDiscovery, error) { d := &robotDiscovery{ port: conf.Port, endpoint: conf.robotEndpoint, diff --git a/discovery/hetzner/robot_test.go b/discovery/hetzner/robot_test.go index 814bccd51f..2618bd097c 100644 --- a/discovery/hetzner/robot_test.go +++ b/discovery/hetzner/robot_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) @@ -42,7 +42,7 @@ func TestRobotSDRefresh(t *testing.T) { cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword} cfg.robotEndpoint = suite.Mock.Endpoint() - d, err := newRobotDiscovery(&cfg, log.NewNopLogger()) + d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger()) require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) @@ -91,7 +91,7 @@ func TestRobotSDRefreshHandleError(t *testing.T) { cfg := DefaultSDConfig cfg.robotEndpoint = suite.Mock.Endpoint() - d, err := newRobotDiscovery(&cfg, log.NewNopLogger()) + d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger()) require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) diff --git a/discovery/http/http.go b/discovery/http/http.go index ff76fd7627..004a5b4ae6 100644 --- a/discovery/http/http.go +++ b/discovery/http/http.go @@ -19,17 +19,18 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" @@ -114,14 +115,14 @@ type Discovery struct { } // NewDiscovery returns a new HTTP discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*httpMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", clientOpts...) diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index 0cafe035dc..9d3a3fb5e7 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -21,11 +21,11 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -49,7 +49,7 @@ func TestHTTPValidRefresh(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() @@ -94,7 +94,7 @@ func TestHTTPInvalidCode(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() @@ -123,7 +123,7 @@ func TestHTTPInvalidFormat(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() @@ -442,7 +442,7 @@ func TestSourceDisappeared(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) for _, test := range cases { ctx := context.Background() diff --git a/discovery/ionos/ionos.go b/discovery/ionos/ionos.go index c8b4f7f8e5..1aa21667e3 100644 --- a/discovery/ionos/ionos.go +++ b/discovery/ionos/ionos.go @@ -16,9 +16,9 @@ package ionos import ( "errors" "fmt" + "log/slog" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -43,7 +43,7 @@ func init() { type Discovery struct{} // NewDiscovery returns a new refresh.Discovery for IONOS Cloud. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*ionosMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/ionos/server.go b/discovery/ionos/server.go index a850fbbfb4..18e89b1d43 100644 --- a/discovery/ionos/server.go +++ b/discovery/ionos/server.go @@ -16,13 +16,13 @@ package ionos import ( "context" "fmt" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" ionoscloud "github.com/ionos-cloud/sdk-go/v6" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -60,7 +60,7 @@ type serverDiscovery struct { datacenterID string } -func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) { +func newServerDiscovery(conf *SDConfig, _ *slog.Logger) (*serverDiscovery, error) { d := &serverDiscovery{ port: conf.Port, datacenterID: conf.DatacenterID, diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 542bc95edc..75da67f1c6 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,7 +33,7 @@ import ( // Endpoints discovers new endpoint targets. type Endpoints struct { - logger log.Logger + logger *slog.Logger endpointsInf cache.SharedIndexInformer serviceInf cache.SharedInformer @@ -49,9 +49,9 @@ type Endpoints struct { } // NewEndpoints returns a new endpoints discovery. -func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { +func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd) @@ -92,13 +92,13 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err) + l.Error("Error adding endpoints event handler.", "err", err) } serviceUpdate := func(o interface{}) { svc, err := convertToService(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) + e.logger.Error("converting to Service object failed", "err", err) return } @@ -111,7 +111,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca } if err != nil { - level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err) + e.logger.Error("retrieving endpoints failed", "err", err) } } _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -131,7 +131,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } _, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(old, cur interface{}) { @@ -154,7 +154,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } if e.withNodeMetadata { _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -172,7 +172,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } } @@ -182,7 +182,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca func (e *Endpoints) enqueueNode(nodeName string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err) + e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err) return } @@ -194,7 +194,7 @@ func (e *Endpoints) enqueueNode(nodeName string) { func (e *Endpoints) enqueuePod(podNamespacedName string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err) + e.logger.Error("Error getting endpoints for pod", "pod", podNamespacedName, "err", err) return } @@ -223,7 +223,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(e.logger).Log("msg", "endpoints informer unable to sync cache") + e.logger.Error("endpoints informer unable to sync cache") } return } @@ -247,13 +247,13 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - level.Error(e.logger).Log("msg", "splitting key failed", "key", key) + e.logger.Error("splitting key failed", "key", key) return true } o, exists, err := e.endpointsStore.GetByKey(key) if err != nil { - level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) + e.logger.Error("getting object from store failed", "key", key) return true } if !exists { @@ -262,7 +262,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) } eps, err := convertToEndpoints(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err) + e.logger.Error("converting to Endpoints object failed", "err", err) return true } send(ctx, ch, e.buildEndpoints(eps)) @@ -400,10 +400,10 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { v := eps.Labels[apiv1.EndpointsOverCapacity] if v == "truncated" { - level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name) + e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name) } if v == "warning" { - level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name) + e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name) } // For all seen pods, check all container ports. If they were not covered @@ -460,7 +460,7 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { obj, exists, err := e.podStore.Get(p) if err != nil { - level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) + e.logger.Error("resolving pod ref failed", "err", err) return nil } if !exists { @@ -476,7 +476,7 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) { obj, exists, err := e.serviceStore.Get(svc) if err != nil { - level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) + e.logger.Error("retrieving service failed", "err", err) return } if !exists { @@ -487,14 +487,14 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) { tg.Labels = tg.Labels.Merge(serviceLabels(svc)) } -func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.Logger, nodeName *string) model.LabelSet { +func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger *slog.Logger, nodeName *string) model.LabelSet { if nodeName == nil { return tg } obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName) if err != nil { - level.Error(logger).Log("msg", "Error getting node", "node", *nodeName, "err", err) + logger.Error("Error getting node", "node", *nodeName, "err", err) return tg } diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 1368303104..efd1c72167 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" "k8s.io/client-go/tools/cache" @@ -35,7 +35,7 @@ import ( // EndpointSlice discovers new endpoint targets. type EndpointSlice struct { - logger log.Logger + logger *slog.Logger endpointSliceInf cache.SharedIndexInformer serviceInf cache.SharedInformer @@ -51,9 +51,9 @@ type EndpointSlice struct { } // NewEndpointSlice returns a new endpointslice discovery. -func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { +func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd) @@ -92,13 +92,13 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod }, }) if err != nil { - level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err) + l.Error("Error adding endpoint slices event handler.", "err", err) } serviceUpdate := func(o interface{}) { svc, err := convertToService(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) + e.logger.Error("converting to Service object failed", "err", err) return } @@ -108,7 +108,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod for _, obj := range e.endpointSliceStore.List() { esa, err := e.getEndpointSliceAdaptor(obj) if err != nil { - level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err) + e.logger.Error("converting to EndpointSlice object failed", "err", err) continue } if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name { @@ -131,7 +131,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } if e.withNodeMetadata { @@ -150,7 +150,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } } @@ -160,7 +160,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod func (e *EndpointSlice) enqueueNode(nodeName string) { endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err) + e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err) return } @@ -188,7 +188,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache") + e.logger.Error("endpointslice informer unable to sync cache") } return } @@ -212,13 +212,13 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - level.Error(e.logger).Log("msg", "splitting key failed", "key", key) + e.logger.Error("splitting key failed", "key", key) return true } o, exists, err := e.endpointSliceStore.GetByKey(key) if err != nil { - level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) + e.logger.Error("getting object from store failed", "key", key) return true } if !exists { @@ -228,7 +228,7 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr esa, err := e.getEndpointSliceAdaptor(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err) + e.logger.Error("converting to EndpointSlice object failed", "err", err) return true } @@ -470,7 +470,7 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { obj, exists, err := e.podStore.Get(p) if err != nil { - level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) + e.logger.Error("resolving pod ref failed", "err", err) return nil } if !exists { @@ -495,7 +495,7 @@ func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgro obj, exists, err := e.serviceStore.Get(svc) if err != nil { - level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) + e.logger.Error("retrieving service failed", "err", err) return } if !exists { diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index 4d91e7a460..1b7847c5c4 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -17,10 +17,9 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" @@ -32,14 +31,14 @@ import ( // Ingress implements discovery of Kubernetes ingress. type Ingress struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewIngress returns a new ingress discovery. -func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { +func NewIngress(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd) ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate) ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete) @@ -66,7 +65,7 @@ func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C }, }) if err != nil { - level.Error(l).Log("msg", "Error adding ingresses event handler.", "err", err) + l.Error("Error adding ingresses event handler.", "err", err) } return s } @@ -86,7 +85,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(i.logger).Log("msg", "ingress informer unable to sync cache") + i.logger.Error("ingress informer unable to sync cache") } return } @@ -127,7 +126,7 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b case *v1.Ingress: ia = newIngressAdaptorFromV1(ingress) default: - level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err", + i.logger.Error("converting to Ingress object failed", "err", fmt.Errorf("received unexpected object: %v", o)) return true } diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 93ac65d8dc..be1c77c205 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "reflect" "strings" @@ -25,11 +26,10 @@ import ( "github.com/prometheus/prometheus/util/strutil" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" apiv1 "k8s.io/api/core/v1" disv1 "k8s.io/api/discovery/v1" @@ -260,7 +260,7 @@ type Discovery struct { sync.RWMutex client kubernetes.Interface role Role - logger log.Logger + logger *slog.Logger namespaceDiscovery *NamespaceDiscovery discoverers []discovery.Discoverer selectors roleSelector @@ -285,14 +285,14 @@ func (d *Discovery) getNamespaces() []string { } // New creates a new Kubernetes discovery for the given role. -func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { +func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { m, ok := metrics.(*kubernetesMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } var ( kcfg *rest.Config @@ -324,7 +324,7 @@ func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Di ownNamespace = string(ownNamespaceContents) } - level.Info(l).Log("msg", "Using pod service account via in-cluster config") + l.Info("Using pod service account via in-cluster config") default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { @@ -446,7 +446,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { go nodeInf.Run(ctx.Done()) } eps := NewEndpointSlice( - log.With(d.logger, "role", "endpointslice"), + d.logger.With("role", "endpointslice"), informer, d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), @@ -506,7 +506,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } eps := NewEndpoints( - log.With(d.logger, "role", "endpoint"), + d.logger.With("role", "endpoint"), d.newEndpointsByNodeInformer(elw), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), @@ -540,7 +540,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { }, } pod := NewPod( - log.With(d.logger, "role", "pod"), + d.logger.With("role", "pod"), d.newPodsByNodeInformer(plw), nodeInformer, d.metrics.eventCount, @@ -564,7 +564,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { }, } svc := NewService( - log.With(d.logger, "role", "service"), + d.logger.With("role", "service"), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.metrics.eventCount, ) @@ -589,7 +589,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) ingress := NewIngress( - log.With(d.logger, "role", "ingress"), + d.logger.With("role", "ingress"), informer, d.metrics.eventCount, ) @@ -598,11 +598,11 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } case RoleNode: nodeInformer := d.newNodeInformer(ctx) - node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.metrics.eventCount) + node := NewNode(d.logger.With("role", "node"), nodeInformer, d.metrics.eventCount) d.discoverers = append(d.discoverers, node) go node.informer.Run(ctx.Done()) default: - level.Error(d.logger).Log("msg", "unknown Kubernetes discovery kind", "role", d.role) + d.logger.Error("unknown Kubernetes discovery kind", "role", d.role) } var wg sync.WaitGroup diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index a026366502..fbbd77c3c3 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -20,8 +20,8 @@ import ( "testing" "time" - "github.com/go-kit/log" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -71,7 +71,7 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer d := &Discovery{ client: clientset, - logger: log.NewNopLogger(), + logger: promslog.NewNopLogger(), role: role, namespaceDiscovery: &nsDiscovery, ownNamespace: "own-ns", diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index 74d87e22c4..eecb52ab50 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -38,16 +38,16 @@ const ( // Node discovers Kubernetes nodes. type Node struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewNode returns a new node discovery. -func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { +func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd) @@ -76,7 +76,7 @@ func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.Coun }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } return n } @@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(n.logger).Log("msg", "node informer unable to sync cache") + n.logger.Error("node informer unable to sync cache") } return } @@ -133,7 +133,7 @@ func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool } node, err := convertToNode(o) if err != nil { - level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err) + n.logger.Error("converting to Node object failed", "err", err) return true } send(ctx, ch, n.buildNode(node)) @@ -181,7 +181,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { addr, addrMap, err := nodeAddress(node) if err != nil { - level.Warn(n.logger).Log("msg", "No node address found", "err", err) + n.logger.Warn("No node address found", "err", err) return nil } addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10)) diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 02990e415f..73568e51c8 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -17,14 +17,14 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" @@ -44,14 +44,14 @@ type Pod struct { nodeInf cache.SharedInformer withNodeMetadata bool store cache.Store - logger log.Logger + logger *slog.Logger queue *workqueue.Type } // NewPod creates a new pod discovery. -func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { +func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd) @@ -81,7 +81,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } if p.withNodeMetadata { @@ -100,7 +100,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } } @@ -127,7 +127,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(p.logger).Log("msg", "pod informer unable to sync cache") + p.logger.Error("pod informer unable to sync cache") } return } @@ -164,7 +164,7 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool } pod, err := convertToPod(o) if err != nil { - level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err) + p.logger.Error("converting to Pod object failed", "err", err) return true } send(ctx, ch, p.buildPod(pod)) @@ -246,7 +246,7 @@ func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containe func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string { cStatus, err := p.findPodContainerStatus(statuses, containerName) if err != nil { - level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err) + p.logger.Debug("cannot find container ID", "err", err) return "" } return cStatus.ContainerID @@ -315,7 +315,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { func (p *Pod) enqueuePodsForNode(nodeName string) { pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(p.logger).Log("msg", "Error getting pods for node", "node", nodeName, "err", err) + p.logger.Error("Error getting pods for node", "node", nodeName, "err", err) return } diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 51204a5a1a..e666497c86 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,16 +33,16 @@ import ( // Service implements discovery of Kubernetes services. type Service struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewService returns a new service discovery. -func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { +func NewService(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) @@ -71,7 +71,7 @@ func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } return s } @@ -91,7 +91,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(s.logger).Log("msg", "service informer unable to sync cache") + s.logger.Error("service informer unable to sync cache") } return } @@ -128,7 +128,7 @@ func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) b } eps, err := convertToService(o) if err != nil { - level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err) + s.logger.Error("converting to Service object failed", "err", err) return true } send(ctx, ch, s.buildService(eps)) diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 634a6b1d4b..dfc12417c0 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/linode/linodego" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -138,7 +138,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*linodeMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/linode/linode_test.go b/discovery/linode/linode_test.go index 3c10650653..7bcaa05ba4 100644 --- a/discovery/linode/linode_test.go +++ b/discovery/linode/linode_test.go @@ -19,10 +19,10 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -238,7 +238,7 @@ func TestLinodeSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Endpoint()) require.NoError(t, err) diff --git a/discovery/manager.go b/discovery/manager.go index cefa90a866..87e0ecc44b 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -16,14 +16,14 @@ package discovery import ( "context" "fmt" + "log/slog" "reflect" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -81,9 +81,9 @@ func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]Discovere } // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } mgr := &Manager{ logger: logger, @@ -104,7 +104,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil { mgr.metrics = metrics } else { - level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) + logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err) return nil } @@ -141,7 +141,7 @@ func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { // Manager maintains a set of discovery providers and sends each update to a map channel. // Targets are grouped by the target set name. type Manager struct { - logger log.Logger + logger *slog.Logger name string httpOpts []config.HTTPClientOption mtx sync.RWMutex @@ -294,7 +294,7 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D } func (m *Manager) startProvider(ctx context.Context, p *Provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) + m.logger.Debug("Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) ctx, cancel := context.WithCancel(ctx) updates := make(chan []*targetgroup.Group) @@ -328,7 +328,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ case tgs, ok := <-updates: m.metrics.ReceivedUpdates.Inc() if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + m.logger.Debug("Discoverer channel closed", "provider", p.name) // Wait for provider cancellation to ensure targets are cleaned up when expected. <-ctx.Done() return @@ -364,7 +364,7 @@ func (m *Manager) sender() { case m.syncCh <- m.allGroups(): default: m.metrics.DelayedUpdates.Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") + m.logger.Debug("Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: default: @@ -458,12 +458,12 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), + Logger: m.logger.With("discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, Metrics: m.sdMetrics[typ], }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) + m.logger.Error("Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 831cefe514..b882c0b02e 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -22,10 +22,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -675,7 +675,7 @@ func TestTargetUpdatesOrder(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond @@ -791,7 +791,7 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -828,7 +828,7 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -868,7 +868,7 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -911,7 +911,7 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -979,7 +979,7 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1023,7 +1023,7 @@ func TestDiscovererConfigs(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1060,7 +1060,7 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1141,7 +1141,7 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1202,7 +1202,7 @@ func TestGaugeFailedConfigs(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1454,7 +1454,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1551,7 +1551,7 @@ func TestUnregisterMetrics(t *testing.T) { refreshMetrics, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) // discoveryManager will be nil if there was an error configuring metrics. require.NotNil(t, discoveryManager) // Unregister all metrics. diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 38b47accff..f81a4410eb 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math/rand" "net" "net/http" @@ -27,7 +28,6 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -140,7 +140,7 @@ type Discovery struct { } // NewDiscovery returns a new Marathon Discovery. -func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*marathonMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 68f6fe3ccc..1a732c0502 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -16,6 +16,7 @@ package moby import ( "context" "fmt" + "log/slog" "net" "net/http" "net/url" @@ -28,7 +29,6 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -128,7 +128,7 @@ type DockerDiscovery struct { } // NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets. -func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { +func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { m, ok := metrics.(*dockerMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go index 398393a15a..00e6a3e4f3 100644 --- a/discovery/moby/docker_test.go +++ b/discovery/moby/docker_test.go @@ -19,9 +19,9 @@ import ( "sort" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -226,7 +226,7 @@ host: %s require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/moby/dockerswarm.go b/discovery/moby/dockerswarm.go index b0147467d2..9e93e581f3 100644 --- a/discovery/moby/dockerswarm.go +++ b/discovery/moby/dockerswarm.go @@ -16,13 +16,13 @@ package moby import ( "context" "fmt" + "log/slog" "net/http" "net/url" "time" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -125,7 +125,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*dockerswarmMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/moby/nodes_test.go b/discovery/moby/nodes_test.go index 4ad1088d1a..973b83c4b6 100644 --- a/discovery/moby/nodes_test.go +++ b/discovery/moby/nodes_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/moby/services_test.go b/discovery/moby/services_test.go index 47ca69e33a..7a966cfeee 100644 --- a/discovery/moby/services_test.go +++ b/discovery/moby/services_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -349,7 +349,7 @@ filters: defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/moby/tasks_test.go b/discovery/moby/tasks_test.go index ef71bc02f5..59d8831c3b 100644 --- a/discovery/moby/tasks_test.go +++ b/discovery/moby/tasks_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go index d9c48120ae..1dbd8f1608 100644 --- a/discovery/nomad/nomad.go +++ b/discovery/nomad/nomad.go @@ -17,12 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "time" - "github.com/go-kit/log" nomad "github.com/hashicorp/nomad/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -121,7 +121,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*nomadMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/nomad/nomad_test.go b/discovery/nomad/nomad_test.go index 357d4a8e9b..32b087524c 100644 --- a/discovery/nomad/nomad_test.go +++ b/discovery/nomad/nomad_test.go @@ -21,9 +21,9 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -160,7 +160,7 @@ func TestNomadSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) tgs, err := d.refresh(context.Background()) diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index 8964da9294..ec127b1861 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -16,10 +16,10 @@ package openstack import ( "context" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" @@ -43,14 +43,14 @@ type HypervisorDiscovery struct { provider *gophercloud.ProviderClient authOpts *gophercloud.AuthOptions region string - logger log.Logger + logger *slog.Logger port int availability gophercloud.Availability } // newHypervisorDiscovery returns a new hypervisor discovery. func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, - port int, region string, availability gophercloud.Availability, l log.Logger, + port int, region string, availability gophercloud.Availability, l *slog.Logger, ) *HypervisorDiscovery { return &HypervisorDiscovery{ provider: provider, authOpts: opts, diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 78c669e6f7..2a9e29f2ef 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -16,17 +16,17 @@ package openstack import ( "context" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "github.com/gophercloud/gophercloud/pagination" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" @@ -52,7 +52,7 @@ type InstanceDiscovery struct { provider *gophercloud.ProviderClient authOpts *gophercloud.AuthOptions region string - logger log.Logger + logger *slog.Logger port int allTenants bool availability gophercloud.Availability @@ -60,10 +60,10 @@ type InstanceDiscovery struct { // NewInstanceDiscovery returns a new instance discovery. func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, - port int, region string, allTenants bool, availability gophercloud.Availability, l log.Logger, + port int, region string, allTenants bool, availability gophercloud.Availability, l *slog.Logger, ) *InstanceDiscovery { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } return &InstanceDiscovery{ provider: provider, authOpts: opts, @@ -134,7 +134,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, for _, s := range instanceList { if len(s.Addresses) == 0 { - level.Info(i.logger).Log("msg", "Got no IP address", "instance", s.ID) + i.logger.Info("Got no IP address", "instance", s.ID) continue } @@ -151,7 +151,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, if !nameOk { flavorID, idOk := s.Flavor["id"].(string) if !idOk { - level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string") + i.logger.Warn("Invalid type for both flavor original_name and flavor id, expected string") continue } labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) @@ -171,22 +171,22 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, for pool, address := range s.Addresses { md, ok := address.([]interface{}) if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for address, expected array") + i.logger.Warn("Invalid type for address, expected array") continue } if len(md) == 0 { - level.Debug(i.logger).Log("msg", "Got no IP address", "instance", s.ID) + i.logger.Debug("Got no IP address", "instance", s.ID) continue } for _, address := range md { md1, ok := address.(map[string]interface{}) if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for address, expected dict") + i.logger.Warn("Invalid type for address, expected dict") continue } addr, ok := md1["addr"].(string) if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for address, expected string") + i.logger.Warn("Invalid type for address, expected string") continue } if _, ok := floatingIPPresent[addr]; ok { diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index c98f78788d..fa7e0cce90 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -17,10 +17,10 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "time" - "github.com/go-kit/log" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/mwitkow/go-conntrack" @@ -142,7 +142,7 @@ type refresher interface { } // NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*openstackMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") @@ -163,7 +163,7 @@ func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetr ), nil } -func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { +func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) { var opts gophercloud.AuthOptions if conf.IdentityEndpoint == "" { var err error diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go index a70857a08b..15bb9809c9 100644 --- a/discovery/ovhcloud/dedicated_server.go +++ b/discovery/ovhcloud/dedicated_server.go @@ -16,13 +16,12 @@ package ovhcloud import ( "context" "fmt" + "log/slog" "net/netip" "net/url" "path" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/ovh/go-ovh/ovh" "github.com/prometheus/common/model" @@ -55,10 +54,10 @@ type dedicatedServer struct { type dedicatedServerDiscovery struct { *refresh.Discovery config *SDConfig - logger log.Logger + logger *slog.Logger } -func newDedicatedServerDiscovery(conf *SDConfig, logger log.Logger) *dedicatedServerDiscovery { +func newDedicatedServerDiscovery(conf *SDConfig, logger *slog.Logger) *dedicatedServerDiscovery { return &dedicatedServerDiscovery{config: conf, logger: logger} } @@ -115,10 +114,7 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou for _, dedicatedServerName := range dedicatedServerList { dedicatedServer, err := getDedicatedServerDetails(client, dedicatedServerName) if err != nil { - err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error()) - if err != nil { - return nil, err - } + d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error()) continue } dedicatedServerDetailedList = append(dedicatedServerDetailedList, *dedicatedServer) diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go index 52311bcc87..f9dbd6af9c 100644 --- a/discovery/ovhcloud/dedicated_server_test.go +++ b/discovery/ovhcloud/dedicated_server_test.go @@ -21,8 +21,8 @@ import ( "os" "testing" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) @@ -41,7 +41,7 @@ application_secret: %s consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest) require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) - d, err := newRefresher(&cfg, log.NewNopLogger()) + d, err := newRefresher(&cfg, promslog.NewNopLogger()) require.NoError(t, err) ctx := context.Background() targetGroups, err := d.refresh(ctx) diff --git a/discovery/ovhcloud/ovhcloud.go b/discovery/ovhcloud/ovhcloud.go index 988b4482f2..08ed70296b 100644 --- a/discovery/ovhcloud/ovhcloud.go +++ b/discovery/ovhcloud/ovhcloud.go @@ -17,10 +17,10 @@ import ( "context" "errors" "fmt" + "log/slog" "net/netip" "time" - "github.com/go-kit/log" "github.com/ovh/go-ovh/ovh" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -137,7 +137,7 @@ func parseIPList(ipList []string) ([]netip.Addr, error) { return ipAddresses, nil } -func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) { +func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) { switch conf.Service { case "vps": return newVpsDiscovery(conf, logger), nil @@ -148,7 +148,7 @@ func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) { } // NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*ovhcloudMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/ovhcloud/ovhcloud_test.go b/discovery/ovhcloud/ovhcloud_test.go index 9c95bf90e6..84a35af3ad 100644 --- a/discovery/ovhcloud/ovhcloud_test.go +++ b/discovery/ovhcloud/ovhcloud_test.go @@ -20,11 +20,11 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/util/testutil" ) var ( @@ -121,7 +121,7 @@ func TestParseIPs(t *testing.T) { func TestDiscoverer(t *testing.T) { conf, _ := getMockConf("vps") - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go index 58ceeabd87..7050f826a5 100644 --- a/discovery/ovhcloud/vps.go +++ b/discovery/ovhcloud/vps.go @@ -16,13 +16,12 @@ package ovhcloud import ( "context" "fmt" + "log/slog" "net/netip" "net/url" "path" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/ovh/go-ovh/ovh" "github.com/prometheus/common/model" @@ -68,10 +67,10 @@ type virtualPrivateServer struct { type vpsDiscovery struct { *refresh.Discovery config *SDConfig - logger log.Logger + logger *slog.Logger } -func newVpsDiscovery(conf *SDConfig, logger log.Logger) *vpsDiscovery { +func newVpsDiscovery(conf *SDConfig, logger *slog.Logger) *vpsDiscovery { return &vpsDiscovery{config: conf, logger: logger} } @@ -133,10 +132,7 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { for _, vpsName := range vpsList { vpsDetailed, err := getVpsDetails(client, vpsName) if err != nil { - err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error()) - if err != nil { - return nil, err - } + d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error()) continue } vpsDetailedList = append(vpsDetailedList, *vpsDetailed) diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go index 2d2d6dcd21..00d59da7f0 100644 --- a/discovery/ovhcloud/vps_test.go +++ b/discovery/ovhcloud/vps_test.go @@ -23,8 +23,8 @@ import ( yaml "gopkg.in/yaml.v2" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) @@ -43,7 +43,7 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) - d, err := newRefresher(&cfg, log.NewNopLogger()) + d, err := newRefresher(&cfg, promslog.NewNopLogger()) require.NoError(t, err) ctx := context.Background() targetGroups, err := d.refresh(ctx) diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index 8f89acbf93..6122a76da7 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -19,6 +19,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "net/url" @@ -27,11 +28,11 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" @@ -138,14 +139,14 @@ type Discovery struct { } // NewDiscovery returns a new PuppetDB discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*puppetdbMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http") diff --git a/discovery/puppetdb/puppetdb_test.go b/discovery/puppetdb/puppetdb_test.go index bf9c7b215e..4585b78223 100644 --- a/discovery/puppetdb/puppetdb_test.go +++ b/discovery/puppetdb/puppetdb_test.go @@ -22,10 +22,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -70,7 +70,7 @@ func TestPuppetSlashInURL(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) require.Equal(t, apiURL, d.url) @@ -94,7 +94,7 @@ func TestPuppetDBRefresh(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -142,7 +142,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -201,7 +201,7 @@ func TestPuppetDBInvalidCode(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -229,7 +229,7 @@ func TestPuppetDBInvalidFormat(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/refresh/refresh.go b/discovery/refresh/refresh.go index f037a90cff..31646c0e4c 100644 --- a/discovery/refresh/refresh.go +++ b/discovery/refresh/refresh.go @@ -16,17 +16,17 @@ package refresh import ( "context" "errors" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) type Options struct { - Logger log.Logger + Logger *slog.Logger Mech string Interval time.Duration RefreshF func(ctx context.Context) ([]*targetgroup.Group, error) @@ -35,7 +35,7 @@ type Options struct { // Discovery implements the Discoverer interface. type Discovery struct { - logger log.Logger + logger *slog.Logger interval time.Duration refreshf func(ctx context.Context) ([]*targetgroup.Group, error) metrics *discovery.RefreshMetrics @@ -45,9 +45,9 @@ type Discovery struct { func NewDiscovery(opts Options) *Discovery { m := opts.MetricsInstantiator.Instantiate(opts.Mech) - var logger log.Logger + var logger *slog.Logger if opts.Logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } else { logger = opts.Logger } @@ -68,7 +68,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + d.logger.Error("Unable to refresh target groups", "err", err.Error()) } } else { select { @@ -87,7 +87,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + d.logger.Error("Unable to refresh target groups", "err", err.Error()) } continue } diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index f8e1a83f5e..670e439c4f 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -17,12 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "os" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -185,7 +185,7 @@ func init() { // the Discoverer interface. type Discovery struct{} -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*scalewayMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index 675149f2a3..7b3b18f471 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -19,12 +19,12 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "strings" "time" - "github.com/go-kit/log" "github.com/mwitkow/go-conntrack" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -146,7 +146,7 @@ type Discovery struct { } // New returns a new Discovery which periodically refreshes its targets. -func New(logger log.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*tritonMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index 2ab3396951..de806895d7 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "net/url" "path" @@ -24,7 +25,6 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/kolo/xmlrpc" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -109,7 +109,7 @@ type Discovery struct { entitlement string separator string interval time.Duration - logger log.Logger + logger *slog.Logger } // NewDiscovererMetrics implements discovery.Config. @@ -212,7 +212,7 @@ func getEndpointInfoForSystems( } // NewDiscovery returns a uyuni discovery for the given configuration. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*uyuniMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index aaa9c64e47..f82b22168a 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -16,13 +16,13 @@ package vultr import ( "context" "fmt" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -114,7 +114,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*vultrMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/vultr/vultr_test.go b/discovery/vultr/vultr_test.go index 2f12a35529..00ef21e38c 100644 --- a/discovery/vultr/vultr_test.go +++ b/discovery/vultr/vultr_test.go @@ -19,9 +19,9 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -57,7 +57,7 @@ func TestVultrSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdMock.Mock.Endpoint()) require.NoError(t, err) diff --git a/discovery/xds/kuma.go b/discovery/xds/kuma.go index d1d540aaf4..55b3d628e5 100644 --- a/discovery/xds/kuma.go +++ b/discovery/xds/kuma.go @@ -15,14 +15,14 @@ package xds import ( "fmt" + "log/slog" "net/url" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "google.golang.org/protobuf/types/known/anypb" "github.com/prometheus/prometheus/discovery" @@ -99,7 +99,7 @@ func (c *KumaSDConfig) SetDirectory(dir string) { func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { logger := opts.Logger if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return NewKumaHTTPDiscovery(c, logger, opts.Metrics) @@ -158,7 +158,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L return targets, nil } -func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { +func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { m, ok := metrics.(*xdsMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") @@ -170,7 +170,7 @@ func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discove var err error clientID, err = osutil.GetFQDN() if err != nil { - level.Debug(logger).Log("msg", "error getting FQDN", "err", err) + logger.Debug("error getting FQDN", "err", err) clientID = "prometheus" } } diff --git a/discovery/xds/kuma_mads.pb.go b/discovery/xds/kuma_mads.pb.go index b1079bf23f..210a5343a4 100644 --- a/discovery/xds/kuma_mads.pb.go +++ b/discovery/xds/kuma_mads.pb.go @@ -23,13 +23,14 @@ package xds import ( context "context" + reflect "reflect" + sync "sync" + v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" ) const ( diff --git a/discovery/xds/xds.go b/discovery/xds/xds.go index 8191d6be1a..db55a2b6f7 100644 --- a/discovery/xds/xds.go +++ b/discovery/xds/xds.go @@ -15,11 +15,10 @@ package xds import ( "context" + "log/slog" "time" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "google.golang.org/protobuf/encoding/protojson" @@ -104,7 +103,7 @@ type fetchDiscovery struct { refreshInterval time.Duration parseResources resourceParser - logger log.Logger + logger *slog.Logger metrics *xdsMetrics } @@ -140,7 +139,7 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou } if err != nil { - level.Error(d.logger).Log("msg", "error parsing resources", "err", err) + d.logger.Error("error parsing resources", "err", err) d.metrics.fetchFailuresCount.Inc() return } @@ -153,12 +152,12 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou parsedTargets, err := d.parseResources(response.Resources, response.TypeUrl) if err != nil { - level.Error(d.logger).Log("msg", "error parsing resources", "err", err) + d.logger.Error("error parsing resources", "err", err) d.metrics.fetchFailuresCount.Inc() return } - level.Debug(d.logger).Log("msg", "Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets)) + d.logger.Debug("Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets)) select { case <-ctx.Done(): diff --git a/discovery/xds/xds_test.go b/discovery/xds/xds_test.go index 7cce021c5f..db10adc1a2 100644 --- a/discovery/xds/xds_test.go +++ b/discovery/xds/xds_test.go @@ -22,9 +22,9 @@ import ( "time" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" "google.golang.org/protobuf/types/known/anypb" @@ -90,7 +90,7 @@ func constantResourceParser(targets []model.LabelSet, err error) resourceParser } } -var nopLogger = log.NewNopLogger() +var nopLogger = promslog.NewNopLogger() type testResourceClient struct { resourceTypeURL string diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index 92904dd71c..a1cfe3d055 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -18,15 +18,16 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/go-zookeeper/zk" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -146,16 +147,16 @@ type Discovery struct { treeCaches []*treecache.ZookeeperTreeCache parse func(data []byte, path string) (model.LabelSet, error) - logger log.Logger + logger *slog.Logger } // NewNerveDiscovery returns a new Discovery for the given Nerve config. -func NewNerveDiscovery(conf *NerveSDConfig, logger log.Logger) (*Discovery, error) { +func NewNerveDiscovery(conf *NerveSDConfig, logger *slog.Logger) (*Discovery, error) { return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseNerveMember) } // NewServersetDiscovery returns a new Discovery for the given serverset config. -func NewServersetDiscovery(conf *ServersetSDConfig, logger log.Logger) (*Discovery, error) { +func NewServersetDiscovery(conf *ServersetSDConfig, logger *slog.Logger) (*Discovery, error) { return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseServersetMember) } @@ -165,11 +166,11 @@ func NewDiscovery( srvs []string, timeout time.Duration, paths []string, - logger log.Logger, + logger *slog.Logger, pf func(data []byte, path string) (model.LabelSet, error), ) (*Discovery, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } conn, _, err := zk.Connect( diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index 8ccbafe6f1..128132a8d2 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -18,6 +18,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "os" @@ -26,10 +27,9 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" prom_discovery "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -41,7 +41,7 @@ var ( a = kingpin.New("sd adapter usage", "Tool to generate file_sd target files for unimplemented SD mechanisms.") outputFile = a.Flag("output.file", "Output file for file_sd compatible file.").Default("custom_sd.json").String() listenAddress = a.Flag("listen.address", "The address the Consul HTTP API is listening on for requests.").Default("localhost:8500").String() - logger log.Logger + logger *slog.Logger // addressLabel is the name for the label containing a target's address. addressLabel = model.MetaLabelPrefix + "consul_address" @@ -90,7 +90,7 @@ type discovery struct { address string refreshInterval int tagSeparator string - logger log.Logger + logger *slog.Logger oldSourceList map[string]bool } @@ -164,7 +164,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { var srvs map[string][]string resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address)) if err != nil { - level.Error(d.logger).Log("msg", "Error getting services list", "err", err) + d.logger.Error("Error getting services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) continue } @@ -173,7 +173,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { io.Copy(io.Discard, resp.Body) resp.Body.Close() if err != nil { - level.Error(d.logger).Log("msg", "Error reading services list", "err", err) + d.logger.Error("Error reading services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) continue } @@ -181,7 +181,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { err = json.Unmarshal(b, &srvs) resp.Body.Close() if err != nil { - level.Error(d.logger).Log("msg", "Error parsing services list", "err", err) + d.logger.Error("Error parsing services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) continue } @@ -200,13 +200,13 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/service/%s", d.address, name)) if err != nil { - level.Error(d.logger).Log("msg", "Error getting services nodes", "service", name, "err", err) + d.logger.Error("Error getting services nodes", "service", name, "err", err) break } tg, err := d.parseServiceNodes(resp, name) if err != nil { - level.Error(d.logger).Log("msg", "Error parsing services nodes", "service", name, "err", err) + d.logger.Error("Error parsing services nodes", "service", name, "err", err) break } tgs = append(tgs, tg) @@ -254,8 +254,7 @@ func main() { fmt.Println("err: ", err) return } - logger = log.NewSyncLogger(log.NewLogfmtLogger(os.Stdout)) - logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + logger = promslog.New(&promslog.Config{}) ctx := context.Background() @@ -272,7 +271,7 @@ func main() { } if err != nil { - level.Error(logger).Log("msg", "failed to create discovery metrics", "err", err) + logger.Error("failed to create discovery metrics", "err", err) os.Exit(1) } @@ -280,7 +279,7 @@ func main() { refreshMetrics := prom_discovery.NewRefreshMetrics(reg) metrics, err := prom_discovery.RegisterSDMetrics(reg, refreshMetrics) if err != nil { - level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err) + logger.Error("failed to register service discovery metrics", "err", err) os.Exit(1) } diff --git a/documentation/examples/custom-sd/adapter/adapter.go b/documentation/examples/custom-sd/adapter/adapter.go index dcf5a2b78c..b242c4eaa0 100644 --- a/documentation/examples/custom-sd/adapter/adapter.go +++ b/documentation/examples/custom-sd/adapter/adapter.go @@ -18,13 +18,12 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "os" "path/filepath" "reflect" "sort" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -55,7 +54,7 @@ type Adapter struct { manager *discovery.Manager output string name string - logger log.Logger + logger *slog.Logger } func mapToArray(m map[string]*customSD) []customSD { @@ -106,7 +105,7 @@ func (a *Adapter) refreshTargetGroups(allTargetGroups map[string][]*targetgroup. a.groups = tempGroups err := a.writeOutput() if err != nil { - level.Error(log.With(a.logger, "component", "sd-adapter")).Log("err", err) + a.logger.With("component", "sd-adapter").Error("failed to write output", "err", err) } } } @@ -163,7 +162,7 @@ func (a *Adapter) Run() { } // NewAdapter creates a new instance of Adapter. -func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter { +func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger *slog.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter { return &Adapter{ ctx: ctx, disc: d, diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index a1be5c9b4e..0aad437588 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -4,7 +4,6 @@ go 1.22.0 require ( github.com/alecthomas/kingpin/v2 v2.4.0 - github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.6 @@ -26,6 +25,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go index 36242a8f4d..b02560dbab 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go @@ -16,19 +16,19 @@ package graphite import ( "bytes" "fmt" + "log/slog" "math" "net" "sort" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" ) // Client allows sending batches of Prometheus samples to Graphite. type Client struct { - logger log.Logger + logger *slog.Logger address string transport string @@ -37,9 +37,9 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client { +func NewClient(logger *slog.Logger, address, transport string, timeout time.Duration, prefix string) *Client { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return &Client{ logger: logger, @@ -93,7 +93,7 @@ func (c *Client) Write(samples model.Samples) error { t := float64(s.Timestamp.UnixNano()) / 1e9 v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "Cannot send value to Graphite, skipping sample", "value", v, "sample", s) + c.logger.Debug("Cannot send value to Graphite, skipping sample", "value", v, "sample", s) continue } fmt.Fprintf(&buf, "%s %f %f\n", k, v, t) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index e84ed9e129..6ae40f8173 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -17,22 +17,22 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "math" "os" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" influx "github.com/influxdata/influxdb/client/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/prompb" ) // Client allows sending batches of Prometheus samples to InfluxDB. type Client struct { - logger log.Logger + logger *slog.Logger client influx.Client database string @@ -41,16 +41,16 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client { +func NewClient(logger *slog.Logger, conf influx.HTTPConfig, db, rp string) *Client { c, err := influx.NewHTTPClient(conf) // Currently influx.NewClient() *should* never return an error. if err != nil { - level.Error(logger).Log("err", err) + logger.Error("Error creating influx HTTP client", "err", err) os.Exit(1) } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return &Client{ @@ -84,7 +84,7 @@ func (c *Client) Write(samples model.Samples) error { for _, s := range samples { v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "Cannot send to InfluxDB, skipping sample", "value", v, "sample", s) + c.logger.Debug("Cannot send to InfluxDB, skipping sample", "value", v, "sample", s) c.ignoredSamples.Inc() continue } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/main.go b/documentation/examples/remote_storage/remote_storage_adapter/main.go index bb348aba7f..7f62990d2e 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/main.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/main.go @@ -17,6 +17,7 @@ package main import ( "fmt" "io" + "log/slog" "net/http" _ "net/http/pprof" "net/url" @@ -26,16 +27,14 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" influx "github.com/influxdata/influxdb/client/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/model" - "github.com/prometheus/common/promlog" - "github.com/prometheus/common/promlog/flag" + "github.com/prometheus/common/promslog" + "github.com/prometheus/common/promslog/flag" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb" @@ -57,7 +56,7 @@ type config struct { remoteTimeout time.Duration listenAddr string telemetryPath string - promlogConfig promlog.Config + promslogConfig promslog.Config } var ( @@ -105,11 +104,11 @@ func main() { cfg := parseFlags() http.Handle(cfg.telemetryPath, promhttp.Handler()) - logger := promlog.New(&cfg.promlogConfig) + logger := promslog.New(&cfg.promslogConfig) writers, readers := buildClients(logger, cfg) if err := serve(logger, cfg.listenAddr, writers, readers); err != nil { - level.Error(logger).Log("msg", "Failed to listen", "addr", cfg.listenAddr, "err", err) + logger.Error("Failed to listen", "addr", cfg.listenAddr, "err", err) os.Exit(1) } } @@ -120,7 +119,7 @@ func parseFlags() *config { cfg := &config{ influxdbPassword: os.Getenv("INFLUXDB_PW"), - promlogConfig: promlog.Config{}, + promslogConfig: promslog.Config{}, } a.Flag("graphite-address", "The host:port of the Graphite server to send samples to. None, if empty."). @@ -146,7 +145,7 @@ func parseFlags() *config { a.Flag("web.telemetry-path", "Address to listen on for web endpoints."). Default("/metrics").StringVar(&cfg.telemetryPath) - flag.AddFlags(a, &cfg.promlogConfig) + flag.AddFlags(a, &cfg.promslogConfig) _, err := a.Parse(os.Args[1:]) if err != nil { @@ -168,19 +167,19 @@ type reader interface { Name() string } -func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { +func buildClients(logger *slog.Logger, cfg *config) ([]writer, []reader) { var writers []writer var readers []reader if cfg.graphiteAddress != "" { c := graphite.NewClient( - log.With(logger, "storage", "Graphite"), + logger.With("storage", "Graphite"), cfg.graphiteAddress, cfg.graphiteTransport, cfg.remoteTimeout, cfg.graphitePrefix) writers = append(writers, c) } if cfg.opentsdbURL != "" { c := opentsdb.NewClient( - log.With(logger, "storage", "OpenTSDB"), + logger.With("storage", "OpenTSDB"), cfg.opentsdbURL, cfg.remoteTimeout, ) @@ -189,7 +188,7 @@ func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { if cfg.influxdbURL != "" { url, err := url.Parse(cfg.influxdbURL) if err != nil { - level.Error(logger).Log("msg", "Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err) + logger.Error("Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err) os.Exit(1) } conf := influx.HTTPConfig{ @@ -199,7 +198,7 @@ func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { Timeout: cfg.remoteTimeout, } c := influxdb.NewClient( - log.With(logger, "storage", "InfluxDB"), + logger.With("storage", "InfluxDB"), conf, cfg.influxdbDatabase, cfg.influxdbRetentionPolicy, @@ -208,15 +207,15 @@ func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { writers = append(writers, c) readers = append(readers, c) } - level.Info(logger).Log("msg", "Starting up...") + logger.Info("Starting up...") return writers, readers } -func serve(logger log.Logger, addr string, writers []writer, readers []reader) error { +func serve(logger *slog.Logger, addr string, writers []writer, readers []reader) error { http.HandleFunc("/write", func(w http.ResponseWriter, r *http.Request) { req, err := remote.DecodeWriteRequest(r.Body) if err != nil { - level.Error(logger).Log("msg", "Read error", "err", err.Error()) + logger.Error("Read error", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -238,21 +237,21 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) { compressed, err := io.ReadAll(r.Body) if err != nil { - level.Error(logger).Log("msg", "Read error", "err", err.Error()) + logger.Error("Read error", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } reqBuf, err := snappy.Decode(nil, compressed) if err != nil { - level.Error(logger).Log("msg", "Decode error", "err", err.Error()) + logger.Error("Decode error", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } var req prompb.ReadRequest if err := proto.Unmarshal(reqBuf, &req); err != nil { - level.Error(logger).Log("msg", "Unmarshal error", "err", err.Error()) + logger.Error("Unmarshal error", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -267,7 +266,7 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e var resp *prompb.ReadResponse resp, err = reader.Read(&req) if err != nil { - level.Warn(logger).Log("msg", "Error executing query", "query", req, "storage", reader.Name(), "err", err) + logger.Warn("Error executing query", "query", req, "storage", reader.Name(), "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -283,7 +282,7 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e compressed = snappy.Encode(nil, data) if _, err := w.Write(compressed); err != nil { - level.Warn(logger).Log("msg", "Error writing response", "storage", reader.Name(), "err", err) + logger.Warn("Error writing response", "storage", reader.Name(), "err", err) } }) @@ -309,12 +308,12 @@ func protoToSamples(req *prompb.WriteRequest) model.Samples { return samples } -func sendSamples(logger log.Logger, w writer, samples model.Samples) { +func sendSamples(logger *slog.Logger, w writer, samples model.Samples) { begin := time.Now() err := w.Write(samples) duration := time.Since(begin).Seconds() if err != nil { - level.Warn(logger).Log("msg", "Error sending samples to remote storage", "err", err, "storage", w.Name(), "num_samples", len(samples)) + logger.Warn("Error sending samples to remote storage", "err", err, "storage", w.Name(), "num_samples", len(samples)) failedSamples.WithLabelValues(w.Name()).Add(float64(len(samples))) } sentSamples.WithLabelValues(w.Name()).Add(float64(len(samples))) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go index abb1d0b7d3..433c70527a 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go @@ -19,13 +19,12 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "math" "net/http" "net/url" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" ) @@ -36,14 +35,14 @@ const ( // Client allows sending batches of Prometheus samples to OpenTSDB. type Client struct { - logger log.Logger + logger *slog.Logger url string timeout time.Duration } // NewClient creates a new Client. -func NewClient(logger log.Logger, url string, timeout time.Duration) *Client { +func NewClient(logger *slog.Logger, url string, timeout time.Duration) *Client { return &Client{ logger: logger, url: url, @@ -78,7 +77,7 @@ func (c *Client) Write(samples model.Samples) error { for _, s := range samples { v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "Cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s) + c.logger.Debug("Cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s) continue } metric := TagValue(s.Metric[model.MetricNameLabel]) diff --git a/go.mod b/go.mod index d3c4d290d4..f41b063242 100644 --- a/go.mod +++ b/go.mod @@ -24,8 +24,6 @@ require ( github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fsnotify/fsnotify v1.7.0 - github.com/go-kit/log v0.2.1 - github.com/go-logfmt/logfmt v0.6.0 github.com/go-openapi/strfmt v0.23.0 github.com/go-zookeeper/zk v1.0.4 github.com/gogo/protobuf v1.3.2 @@ -54,10 +52,10 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.59.1 + github.com/prometheus/common v0.60.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.12.0 + github.com/prometheus/exporter-toolkit v0.13.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 @@ -79,7 +77,6 @@ require ( golang.org/x/sync v0.8.0 golang.org/x/sys v0.26.0 golang.org/x/text v0.19.0 - golang.org/x/time v0.6.0 golang.org/x/tools v0.26.0 google.golang.org/api v0.199.0 google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 @@ -119,7 +116,6 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-kit/kit v0.12.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.22.2 // indirect @@ -195,6 +191,7 @@ require ( golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/term v0.25.0 // indirect + golang.org/x/time v0.6.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -207,11 +204,6 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace ( - k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0 - k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.5.0 -) - // Exclude linodego v1.0.0 as it is no longer published on github. exclude github.com/linode/linodego v1.0.0 diff --git a/go.sum b/go.sum index 1a0db294c0..408419ad8c 100644 --- a/go.sum +++ b/go.sum @@ -61,13 +61,8 @@ github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1v github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8= github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -78,23 +73,17 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -102,8 +91,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -117,24 +104,16 @@ github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -142,7 +121,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg= github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= @@ -155,16 +133,10 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -182,11 +154,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -198,17 +167,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= -github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -234,7 +197,6 @@ github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQ github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -242,10 +204,7 @@ github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= @@ -253,7 +212,6 @@ github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -282,7 +240,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -324,7 +281,6 @@ github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -336,28 +292,18 @@ github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDP github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= @@ -366,7 +312,6 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= @@ -384,7 +329,6 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -395,51 +339,38 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ= github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -447,11 +378,9 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= @@ -470,11 +399,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -485,7 +411,6 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -493,7 +418,6 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= @@ -501,23 +425,16 @@ github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -538,64 +455,35 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM= github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -608,54 +496,43 @@ github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= -github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.12.0 h1:DkE5RcEZR3lQA2QD5JLVQIf41dFKNsVMXFhgqcif7fo= -github.com/prometheus/exporter-toolkit v0.12.0/go.mod h1:fQH0KtTn0yrrS0S82kqppRjDDiwMfIQUwT+RBRRhwUc= +github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c= +github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -664,28 +541,14 @@ github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs= -github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k= -github.com/simonpasquier/klog-gokit/v3 v3.5.0 h1:ewnk+ickph0hkQFgdI4pffKIbruAxxWcg0Fe/vQmLOM= -github.com/simonpasquier/klog-gokit/v3 v3.5.0/go.mod h1:S9flvRzzpaYLYtXI2w8jf9R/IU/Cy14NrbvDUevNP1E= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -703,28 +566,20 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -754,27 +609,18 @@ go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8d go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -822,13 +668,8 @@ golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -839,7 +680,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -893,11 +733,7 @@ golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -908,13 +744,11 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -978,22 +812,18 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1005,8 +835,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1014,7 +842,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1041,7 +868,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1059,7 +885,6 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1070,7 +895,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1098,15 +922,10 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1: google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1137,20 +956,13 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1166,7 +978,6 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1180,6 +991,10 @@ k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= @@ -1191,7 +1006,5 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/notifier/notifier.go b/notifier/notifier.go index 5374e73d62..b7f719ea58 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -19,19 +19,19 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net/http" "net/url" "path" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/go-openapi/strfmt" "github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" "go.uber.org/atomic" @@ -117,7 +117,7 @@ type Manager struct { stopRequested chan struct{} alertmanagers map[string]*alertmanagerSet - logger log.Logger + logger *slog.Logger } // Options are the configurable parameters of a Handler. @@ -218,12 +218,12 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp } // NewManager is the manager constructor. -func NewManager(o *Options, logger log.Logger) *Manager { +func NewManager(o *Options, logger *slog.Logger) *Manager { if o.Do == nil { o.Do = do } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } n := &Manager{ @@ -319,7 +319,7 @@ func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { }() wg.Wait() - level.Info(n.logger).Log("msg", "Notification manager stopped") + n.logger.Info("Notification manager stopped") } // sendLoop continuously consumes the notifications queue and sends alerts to @@ -376,20 +376,20 @@ func (n *Manager) sendOneBatch() { func (n *Manager) drainQueue() { if !n.opts.DrainOnShutdown { if n.queueLen() > 0 { - level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) + n.logger.Warn("Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) n.metrics.dropped.Add(float64(n.queueLen())) } return } - level.Info(n.logger).Log("msg", "Draining any remaining notifications...") + n.logger.Info("Draining any remaining notifications...") for n.queueLen() > 0 { n.sendOneBatch() } - level.Info(n.logger).Log("msg", "Remaining notifications drained") + n.logger.Info("Remaining notifications drained") } func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { @@ -399,7 +399,7 @@ func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { for id, tgroup := range tgs { am, ok := n.alertmanagers[id] if !ok { - level.Error(n.logger).Log("msg", "couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) + n.logger.Error("couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) continue } am.sync(tgroup) @@ -422,7 +422,7 @@ func (n *Manager) Send(alerts ...*Alert) { if d := len(alerts) - n.opts.QueueCapacity; d > 0 { alerts = alerts[d:] - level.Warn(n.logger).Log("msg", "Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) + n.logger.Warn("Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) n.metrics.dropped.Add(float64(d)) } @@ -431,7 +431,7 @@ func (n *Manager) Send(alerts ...*Alert) { if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 { n.queue = n.queue[d:] - level.Warn(n.logger).Log("msg", "Alert notification queue full, dropping alerts", "num_dropped", d) + n.logger.Warn("Alert notification queue full, dropping alerts", "num_dropped", d) n.metrics.dropped.Add(float64(d)) } n.queue = append(n.queue, alerts...) @@ -562,7 +562,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { if v1Payload == nil { v1Payload, err = json.Marshal(amAlerts) if err != nil { - level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v1 failed", "err", err) + n.logger.Error("Encoding alerts for Alertmanager API v1 failed", "err", err) ams.mtx.RUnlock() return false } @@ -577,7 +577,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { v2Payload, err = json.Marshal(openAPIAlerts) if err != nil { - level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v2 failed", "err", err) + n.logger.Error("Encoding alerts for Alertmanager API v2 failed", "err", err) ams.mtx.RUnlock() return false } @@ -587,8 +587,8 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { } default: { - level.Error(n.logger).Log( - "msg", fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), + n.logger.Error( + fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), "err", err, ) ams.mtx.RUnlock() @@ -609,7 +609,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { if err := n.sendOne(ctx, client, url, payload); err != nil { - level.Error(n.logger).Log("alertmanager", url, "count", count, "msg", "Error sending alert", "err", err) + n.logger.Error("Error sending alert", "alertmanager", url, "count", count, "err", err) n.metrics.errors.WithLabelValues(url).Inc() } else { numSuccess.Inc() @@ -689,7 +689,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b // // Stop is safe to call multiple times. func (n *Manager) Stop() { - level.Info(n.logger).Log("msg", "Stopping notification manager...") + n.logger.Info("Stopping notification manager...") n.stopOnce.Do(func() { close(n.stopRequested) @@ -724,10 +724,10 @@ type alertmanagerSet struct { mtx sync.RWMutex ams []alertmanager droppedAms []alertmanager - logger log.Logger + logger *slog.Logger } -func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { +func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager") if err != nil { return nil, err @@ -761,7 +761,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { for _, tg := range tgs { ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) if err != nil { - level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err) + s.logger.Error("Creating discovered Alertmanagers failed", "err", err) continue } allAms = append(allAms, ams...) diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index 68dd445811..ac722eb23c 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -26,11 +26,11 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" "gopkg.in/yaml.v2" @@ -751,7 +751,7 @@ func TestHangingNotifier(t *testing.T) { require.NoError(t, err) sdManager := discovery.NewManager( ctx, - log.NewNopLogger(), + promslog.NewNopLogger(), reg, sdMetrics, discovery.Name("sd-manager"), diff --git a/promql/engine.go b/promql/engine.go index e10be63783..e1beb2d910 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "reflect" "runtime" @@ -30,10 +31,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -125,7 +125,11 @@ type QueryEngine interface { // QueryLogger is an interface that can be used to log all the queries logged // by the engine. type QueryLogger interface { - Log(...interface{}) error + Error(msg string, args ...any) + Info(msg string, args ...any) + Debug(msg string, args ...any) + Warn(msg string, args ...any) + With(args ...any) Close() error } @@ -288,7 +292,7 @@ type QueryTracker interface { // EngineOpts contains configuration options used when creating a new Engine. type EngineOpts struct { - Logger log.Logger + Logger *slog.Logger Reg prometheus.Registerer MaxSamples int Timeout time.Duration @@ -326,7 +330,7 @@ type EngineOpts struct { // Engine handles the lifetime of queries from beginning to end. // It is connected to a querier. type Engine struct { - logger log.Logger + logger *slog.Logger metrics *engineMetrics timeout time.Duration maxSamplesPerQuery int @@ -344,7 +348,7 @@ type Engine struct { // NewEngine returns a new engine. func NewEngine(opts EngineOpts) *Engine { if opts.Logger == nil { - opts.Logger = log.NewNopLogger() + opts.Logger = promslog.NewNopLogger() } queryResultSummary := prometheus.NewSummaryVec(prometheus.SummaryOpts{ @@ -403,7 +407,7 @@ func NewEngine(opts EngineOpts) *Engine { if opts.LookbackDelta == 0 { opts.LookbackDelta = defaultLookbackDelta if l := opts.Logger; l != nil { - level.Debug(l).Log("msg", "Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) + l.Debug("Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) } } @@ -455,7 +459,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // not make reload fail; only log a warning. err := ng.queryLogger.Close() if err != nil { - level.Warn(ng.logger).Log("msg", "Error while closing the previous query log file", "err", err) + ng.logger.Warn("Error while closing the previous query log file", "err", err) } } @@ -632,23 +636,23 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota // The step provided by the user is in seconds. params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond)) } - f := []interface{}{"params", params} + l.With("params", params) if err != nil { - f = append(f, "error", err) + l.With("error", err) } - f = append(f, "stats", stats.NewQueryStats(q.Stats())) + l.With("stats", stats.NewQueryStats(q.Stats())) if span := trace.SpanFromContext(ctx); span != nil { - f = append(f, "spanID", span.SpanContext().SpanID()) + l.With("spanID", span.SpanContext().SpanID()) } if origin := ctx.Value(QueryOrigin{}); origin != nil { for k, v := range origin.(map[string]interface{}) { - f = append(f, k, v) + l.With(k, v) } } - if err := l.Log(f...); err != nil { - ng.metrics.queryLogFailures.Inc() - level.Error(ng.logger).Log("msg", "can't log query", "err", err) - } + l.Info("promql query logged") + // TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors? + // ng.metrics.queryLogFailures.Inc() + // ng.logger.Error("can't log query", "err", err) } ng.queryLoggerLock.RUnlock() }() @@ -1059,7 +1063,7 @@ type evaluator struct { maxSamples int currentSamples int - logger log.Logger + logger *slog.Logger lookbackDelta time.Duration samplesStats *stats.QuerySamples noStepSubqueryIntervalFn func(rangeMillis int64) int64 @@ -1089,7 +1093,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] - level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) + ev.logger.Error("runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err diff --git a/promql/engine_internal_test.go b/promql/engine_internal_test.go index cb501b2fdf..0962c218c7 100644 --- a/promql/engine_internal_test.go +++ b/promql/engine_internal_test.go @@ -14,22 +14,21 @@ package promql import ( + "bytes" "errors" "testing" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/util/annotations" ) func TestRecoverEvaluatorRuntime(t *testing.T) { - var output []interface{} - logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { - output = append(output, keyvals...) - return nil - })) + var output bytes.Buffer + logger := promslog.New(&promslog.Config{Writer: &output}) ev := &evaluator{logger: logger} expr, _ := parser.ParseExpr("sum(up)") @@ -38,7 +37,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) { defer func() { require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0") - require.Contains(t, output, "sum(up)") + require.Contains(t, output.String(), "sum(up)") }() defer ev.recover(expr, nil, &err) @@ -48,7 +47,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) { } func TestRecoverEvaluatorError(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} + ev := &evaluator{logger: promslog.NewNopLogger()} var err error e := errors.New("custom error") @@ -62,7 +61,7 @@ func TestRecoverEvaluatorError(t *testing.T) { } func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} + ev := &evaluator{logger: promslog.NewNopLogger()} var err error var ws annotations.Annotations diff --git a/promql/engine_test.go b/promql/engine_test.go index 19bd781445..7c398029f5 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -2018,23 +2018,58 @@ func TestSubquerySelector(t *testing.T) { type FakeQueryLogger struct { closed bool logs []interface{} + attrs []any } func NewFakeQueryLogger() *FakeQueryLogger { return &FakeQueryLogger{ closed: false, logs: make([]interface{}, 0), + attrs: make([]any, 0), } } +// It implements the promql.QueryLogger interface. func (f *FakeQueryLogger) Close() error { f.closed = true return nil } -func (f *FakeQueryLogger) Log(l ...interface{}) error { - f.logs = append(f.logs, l...) - return nil +// It implements the promql.QueryLogger interface. +func (f *FakeQueryLogger) Info(msg string, args ...any) { + log := append([]any{msg}, args...) + log = append(log, f.attrs...) + f.attrs = f.attrs[:0] + f.logs = append(f.logs, log...) +} + +// It implements the promql.QueryLogger interface. +func (f *FakeQueryLogger) Error(msg string, args ...any) { + log := append([]any{msg}, args...) + log = append(log, f.attrs...) + f.attrs = f.attrs[:0] + f.logs = append(f.logs, log...) +} + +// It implements the promql.QueryLogger interface. +func (f *FakeQueryLogger) Warn(msg string, args ...any) { + log := append([]any{msg}, args...) + log = append(log, f.attrs...) + f.attrs = f.attrs[:0] + f.logs = append(f.logs, log...) +} + +// It implements the promql.QueryLogger interface. +func (f *FakeQueryLogger) Debug(msg string, args ...any) { + log := append([]any{msg}, args...) + log = append(log, f.attrs...) + f.attrs = f.attrs[:0] + f.logs = append(f.logs, log...) +} + +// It implements the promql.QueryLogger interface. +func (f *FakeQueryLogger) With(args ...any) { + f.attrs = append(f.attrs, args...) } func TestQueryLogger_basic(t *testing.T) { @@ -2062,9 +2097,8 @@ func TestQueryLogger_basic(t *testing.T) { f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) queryExec() - for i, field := range []interface{}{"params", map[string]interface{}{"query": "test statement"}} { - require.Equal(t, field, f1.logs[i]) - } + require.Contains(t, f1.logs, `params`) + require.Contains(t, f1.logs, map[string]interface{}{"query": "test statement"}) l := len(f1.logs) queryExec() @@ -2110,11 +2144,8 @@ func TestQueryLogger_fields(t *testing.T) { res := query.Exec(ctx) require.NoError(t, res.Err) - expected := []string{"foo", "bar"} - for i, field := range expected { - v := f1.logs[len(f1.logs)-len(expected)+i].(string) - require.Equal(t, field, v) - } + require.Contains(t, f1.logs, `foo`) + require.Contains(t, f1.logs, `bar`) } func TestQueryLogger_error(t *testing.T) { @@ -2140,9 +2171,10 @@ func TestQueryLogger_error(t *testing.T) { res := query.Exec(ctx) require.Error(t, res.Err, "query should have failed") - for i, field := range []interface{}{"params", map[string]interface{}{"query": "test statement"}, "error", testErr} { - require.Equal(t, f1.logs[i], field) - } + require.Contains(t, f1.logs, `params`) + require.Contains(t, f1.logs, map[string]interface{}{"query": "test statement"}) + require.Contains(t, f1.logs, `error`) + require.Contains(t, f1.logs, testErr) } func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { diff --git a/promql/query_logger.go b/promql/query_logger.go index cb3d40519f..c0a70b66d7 100644 --- a/promql/query_logger.go +++ b/promql/query_logger.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -26,14 +27,12 @@ import ( "unicode/utf8" "github.com/edsrzf/mmap-go" - "github.com/go-kit/log" - "github.com/go-kit/log/level" ) type ActiveQueryTracker struct { mmappedFile []byte getNextIndex chan int - logger log.Logger + logger *slog.Logger closer io.Closer maxConcurrent int } @@ -63,11 +62,11 @@ func parseBrokenJSON(brokenJSON []byte) (string, bool) { return queries, true } -func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { +func logUnfinishedQueries(filename string, filesize int, logger *slog.Logger) { if _, err := os.Stat(filename); err == nil { fd, err := os.Open(filename) if err != nil { - level.Error(logger).Log("msg", "Failed to open query log file", "err", err) + logger.Error("Failed to open query log file", "err", err) return } defer fd.Close() @@ -75,7 +74,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { brokenJSON := make([]byte, filesize) _, err = fd.Read(brokenJSON) if err != nil { - level.Error(logger).Log("msg", "Failed to read query log file", "err", err) + logger.Error("Failed to read query log file", "err", err) return } @@ -83,7 +82,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { if !queriesExist { return } - level.Info(logger).Log("msg", "These queries didn't finish in prometheus' last run:", "queries", queries) + logger.Info("These queries didn't finish in prometheus' last run:", "queries", queries) } } @@ -104,38 +103,38 @@ func (f *mmappedFile) Close() error { return err } -func getMMappedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { +func getMMappedFile(filename string, filesize int, logger *slog.Logger) ([]byte, io.Closer, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { absPath, pathErr := filepath.Abs(filename) if pathErr != nil { absPath = filename } - level.Error(logger).Log("msg", "Error opening query log file", "file", absPath, "err", err) + logger.Error("Error opening query log file", "file", absPath, "err", err) return nil, nil, err } err = file.Truncate(int64(filesize)) if err != nil { file.Close() - level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) + logger.Error("Error setting filesize.", "filesize", filesize, "err", err) return nil, nil, err } fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) if err != nil { file.Close() - level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) + logger.Error("Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) return nil, nil, err } return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err } -func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { +func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger *slog.Logger) *ActiveQueryTracker { err := os.MkdirAll(localStoragePath, 0o777) if err != nil { - level.Error(logger).Log("msg", "Failed to create directory for logging active queries") + logger.Error("Failed to create directory for logging active queries") } filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize @@ -174,18 +173,18 @@ func trimStringByBytes(str string, size int) string { return string(bytesStr[:trimIndex]) } -func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte { +func _newJSONEntry(query string, timestamp int64, logger *slog.Logger) []byte { entry := Entry{query, timestamp} jsonEntry, err := json.Marshal(entry) if err != nil { - level.Error(logger).Log("msg", "Cannot create json of query", "query", query) + logger.Error("Cannot create json of query", "query", query) return []byte{} } return jsonEntry } -func newJSONEntry(query string, logger log.Logger) []byte { +func newJSONEntry(query string, logger *slog.Logger) []byte { timestamp := time.Now().Unix() minEntryJSON := _newJSONEntry("", timestamp, logger) diff --git a/rules/alerting.go b/rules/alerting.go index 2dc0917dce..7e74c176aa 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -16,13 +16,12 @@ package rules import ( "context" "fmt" + "log/slog" "net/url" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.uber.org/atomic" "gopkg.in/yaml.v2" @@ -141,7 +140,7 @@ type AlertingRule struct { // the fingerprint of the labelset they correspond to. active map[uint64]*Alert - logger log.Logger + logger *slog.Logger noDependentRules *atomic.Bool noDependencyRules *atomic.Bool @@ -151,7 +150,7 @@ type AlertingRule struct { func NewAlertingRule( name string, vec parser.Expr, hold, keepFiringFor time.Duration, labels, annotations, externalLabels labels.Labels, externalURL string, - restored bool, logger log.Logger, + restored bool, logger *slog.Logger, ) *AlertingRule { el := externalLabels.Map() @@ -381,7 +380,7 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t result, err := tmpl.Expand() if err != nil { result = fmt.Sprintf("", err) - level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData) + r.logger.Warn("Expanding alert template failed", "err", err, "data", tmplData) } return result } diff --git a/rules/alerting_test.go b/rules/alerting_test.go index 67d683c851..f0aa339cc7 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -19,8 +19,8 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" @@ -276,7 +276,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) ruleWithExternalLabels := NewAlertingRule( "ExternalLabelExists", @@ -287,7 +287,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { labels.EmptyLabels(), labels.FromStrings("foo", "bar", "dings", "bums"), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) result := promql.Vector{ promql.Sample{ @@ -371,7 +371,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) ruleWithExternalURL := NewAlertingRule( "ExternalURLExists", @@ -382,7 +382,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "http://localhost:1234", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) result := promql.Vector{ promql.Sample{ @@ -466,7 +466,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) result := promql.Vector{ promql.Sample{ @@ -527,7 +527,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }}; `), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) evalTime := time.Unix(0, 0) @@ -607,7 +607,7 @@ func TestAlertingRuleDuplicate(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) _, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0) require.Error(t, err) @@ -651,7 +651,7 @@ func TestAlertingRuleLimit(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) evalTime := time.Unix(0, 0) @@ -779,7 +779,7 @@ func TestSendAlertsDontAffectActiveAlerts(t *testing.T) { }, }, } - nm := notifier.NewManager(&opts, log.NewNopLogger()) + nm := notifier.NewManager(&opts, promslog.NewNopLogger()) f := SendAlerts(nm, "") notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) { @@ -986,7 +986,7 @@ func TestAlertingEvalWithOrigin(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { @@ -1008,7 +1008,7 @@ func TestAlertingRule_SetNoDependentRules(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) require.False(t, rule.NoDependentRules()) @@ -1029,7 +1029,7 @@ func TestAlertingRule_SetNoDependencyRules(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) require.False(t, rule.NoDependencyRules()) diff --git a/rules/group.go b/rules/group.go index 6e98bf52f2..e9ef2be3ad 100644 --- a/rules/group.go +++ b/rules/group.go @@ -16,6 +16,7 @@ package rules import ( "context" "errors" + "log/slog" "math" "slices" "strings" @@ -26,10 +27,9 @@ import ( "github.com/prometheus/prometheus/promql/parser" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -65,7 +65,7 @@ type Group struct { terminated chan struct{} managerDone chan struct{} - logger log.Logger + logger *slog.Logger metrics *Metrics @@ -124,6 +124,10 @@ func NewGroup(o GroupOptions) *Group { concurrencyController = sequentialRuleEvalController{} } + if o.Opts.Logger == nil { + promslog.NewNopLogger() + } + return &Group{ name: o.Name, file: o.File, @@ -137,7 +141,7 @@ func NewGroup(o GroupOptions) *Group { done: make(chan struct{}), managerDone: o.done, terminated: make(chan struct{}), - logger: log.With(o.Opts.Logger, "file", o.File, "group", o.Name), + logger: o.Opts.Logger.With("file", o.File, "group", o.Name), metrics: metrics, evalIterationFunc: evalIterationFunc, concurrencyController: concurrencyController, @@ -200,7 +204,7 @@ func (g *Group) Interval() time.Duration { return g.interval } // Limit returns the group's limit. func (g *Group) Limit() int { return g.limit } -func (g *Group) Logger() log.Logger { return g.logger } +func (g *Group) Logger() *slog.Logger { return g.logger } func (g *Group) run(ctx context.Context) { defer close(g.terminated) @@ -272,7 +276,7 @@ func (g *Group) run(ctx context.Context) { g.RestoreForState(restoreStartTime) totalRestoreTimeSeconds := time.Since(restoreStartTime).Seconds() g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds) - level.Debug(g.logger).Log("msg", "'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds) + g.logger.Debug("'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds) g.shouldRestore = false } @@ -495,7 +499,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { defer cleanup() } - logger := log.WithPrefix(g.logger, "name", rule.Name(), "index", i) + logger := g.logger.With("name", rule.Name(), "index", i) ctx, sp := otel.Tracer("").Start(ctx, "rule") sp.SetAttributes(attribute.String("name", rule.Name())) defer func(t time.Time) { @@ -508,7 +512,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { }(time.Now()) if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() { - logger = log.WithPrefix(logger, "trace_id", sp.SpanContext().TraceID()) + logger = logger.With("trace_id", sp.SpanContext().TraceID()) } g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() @@ -524,7 +528,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // happens on shutdown and thus we skip logging of any errors here. var eqc promql.ErrQueryCanceled if !errors.As(err, &eqc) { - level.Warn(logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err) + logger.Warn("Evaluating rule failed", "rule", rule, "err", err) } return } @@ -550,7 +554,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { sp.SetStatus(codes.Error, err.Error()) g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - level.Warn(logger).Log("msg", "Rule sample appending failed", "err", err) + logger.Warn("Rule sample appending failed", "err", err) return } g.seriesInPreviousEval[i] = seriesReturned @@ -574,15 +578,15 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { switch { case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): numOutOfOrder++ - level.Debug(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) case errors.Is(unwrappedErr, storage.ErrTooOldSample): numTooOld++ - level.Debug(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): numDuplicates++ - level.Debug(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) default: - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) } } else { buf := [1024]byte{} @@ -590,13 +594,13 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { } } if numOutOfOrder > 0 { - level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) + logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) } if numTooOld > 0 { - level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) + logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) } if numDuplicates > 0 { - level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) + logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) } for metric, lset := range g.seriesInPreviousEval[i] { @@ -615,7 +619,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(logger).Log("msg", "Adding stale sample failed", "sample", lset.String(), "err", err) + logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err) } } } @@ -672,11 +676,11 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err) + g.logger.Warn("Adding stale sample for previous configuration failed", "sample", s, "err", err) } } if err := app.Commit(); err != nil { - level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err) + g.logger.Warn("Stale sample appending for previous configuration failed", "err", err) } else { g.staleSeries = nil } @@ -691,12 +695,12 @@ func (g *Group) RestoreForState(ts time.Time) { mintMS := int64(model.TimeFromUnixNano(mint.UnixNano())) q, err := g.opts.Queryable.Querier(mintMS, maxtMS) if err != nil { - level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err) + g.logger.Error("Failed to get Querier", "err", err) return } defer func() { if err := q.Close(); err != nil { - level.Error(g.logger).Log("msg", "Failed to close Querier", "err", err) + g.logger.Error("Failed to close Querier", "err", err) } }() @@ -717,8 +721,8 @@ func (g *Group) RestoreForState(ts time.Time) { sset, err := alertRule.QueryForStateSeries(g.opts.Context, q) if err != nil { - level.Error(g.logger).Log( - "msg", "Failed to restore 'for' state", + g.logger.Error( + "Failed to restore 'for' state", labels.AlertName, alertRule.Name(), "stage", "Select", "err", err, @@ -737,7 +741,7 @@ func (g *Group) RestoreForState(ts time.Time) { // No results for this alert rule. if len(seriesByLabels) == 0 { - level.Debug(g.logger).Log("msg", "No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name()) + g.logger.Debug("No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name()) alertRule.SetRestored(true) continue } @@ -757,7 +761,7 @@ func (g *Group) RestoreForState(ts time.Time) { t, v = it.At() } if it.Err() != nil { - level.Error(g.logger).Log("msg", "Failed to restore 'for' state", + g.logger.Error("Failed to restore 'for' state", labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err()) return } @@ -799,7 +803,7 @@ func (g *Group) RestoreForState(ts time.Time) { } a.ActiveAt = restoredActiveAt - level.Debug(g.logger).Log("msg", "'for' state restored", + g.logger.Debug("'for' state restored", labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850), "labels", a.Labels.String()) }) diff --git a/rules/manager.go b/rules/manager.go index 9e5b33fbc9..2e89184bd6 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -17,15 +17,15 @@ import ( "context" "errors" "fmt" + "log/slog" "net/url" "slices" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "golang.org/x/sync/semaphore" "github.com/prometheus/prometheus/model/labels" @@ -96,7 +96,7 @@ type Manager struct { done chan struct{} restored bool - logger log.Logger + logger *slog.Logger } // NotifyFunc sends notifications about a set of alerts generated by the given expression. @@ -110,7 +110,7 @@ type ManagerOptions struct { Context context.Context Appendable storage.Appendable Queryable storage.Queryable - Logger log.Logger + Logger *slog.Logger Registerer prometheus.Registerer OutageTolerance time.Duration ForGracePeriod time.Duration @@ -148,6 +148,10 @@ func NewManager(o *ManagerOptions) *Manager { o.RuleDependencyController = ruleDependencyController{} } + if o.Logger == nil { + o.Logger = promslog.NewNopLogger() + } + m := &Manager{ groups: map[string]*Group{}, opts: o, @@ -161,7 +165,7 @@ func NewManager(o *ManagerOptions) *Manager { // Run starts processing of the rule manager. It is blocking. func (m *Manager) Run() { - level.Info(m.logger).Log("msg", "Starting rule manager...") + m.logger.Info("Starting rule manager...") m.start() <-m.done } @@ -175,7 +179,7 @@ func (m *Manager) Stop() { m.mtx.Lock() defer m.mtx.Unlock() - level.Info(m.logger).Log("msg", "Stopping rule manager...") + m.logger.Info("Stopping rule manager...") for _, eg := range m.groups { eg.stop() @@ -185,7 +189,7 @@ func (m *Manager) Stop() { // staleness markers. close(m.done) - level.Info(m.logger).Log("msg", "Rule manager stopped") + m.logger.Info("Rule manager stopped") } // Update the rule manager's state as the config requires. If @@ -206,7 +210,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels if errs != nil { for _, e := range errs { - level.Error(m.logger).Log("msg", "loading groups failed", "err", e) + m.logger.Error("loading groups failed", "err", e) } return errors.New("error loading rules, previous rule set restored") } @@ -323,7 +327,7 @@ func (m *Manager) LoadGroups( externalLabels, externalURL, m.restored, - log.With(m.logger, "alert", r.Alert), + m.logger.With("alert", r.Alert), )) continue } diff --git a/rules/manager_test.go b/rules/manager_test.go index b9f6db3273..ffb15952b9 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -26,10 +26,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" "gopkg.in/yaml.v2" @@ -374,7 +374,7 @@ func TestForStateRestore(t *testing.T) { Appendable: storage, Queryable: storage, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, OutageTolerance: 30 * time.Minute, ForGracePeriod: 10 * time.Minute, @@ -547,7 +547,7 @@ func TestStaleness(t *testing.T) { Appendable: st, Queryable: st, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("a + 1") @@ -641,7 +641,7 @@ groups: require.NoError(t, err) m := NewManager(&ManagerOptions{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), DefaultRuleQueryOffset: func() time.Duration { return time.Minute }, @@ -781,7 +781,7 @@ func TestUpdate(t *testing.T) { Queryable: st, QueryFunc: EngineQueryFunc(engine, st), Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) ruleManager.start() defer ruleManager.Stop() @@ -923,14 +923,14 @@ func TestNotify(t *testing.T) { Appendable: storage, Queryable: storage, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), NotifyFunc: notifyFunc, ResendDelay: 2 * time.Second, } expr, err := parser.ParseExpr("a > 1") require.NoError(t, err) - rule := NewAlertingRule("aTooHigh", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger()) + rule := NewAlertingRule("aTooHigh", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger()) group := NewGroup(GroupOptions{ Name: "alert", Interval: time.Second, @@ -994,7 +994,7 @@ func TestMetricsUpdate(t *testing.T) { Queryable: storage, QueryFunc: EngineQueryFunc(engine, storage), Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Registerer: registry, }) ruleManager.start() @@ -1068,7 +1068,7 @@ func TestGroupStalenessOnRemoval(t *testing.T) { Queryable: storage, QueryFunc: EngineQueryFunc(engine, storage), Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) var stopped bool ruleManager.start() @@ -1145,7 +1145,7 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) { Queryable: storage, QueryFunc: EngineQueryFunc(engine, storage), Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) var stopped bool ruleManager.start() @@ -1247,7 +1247,7 @@ func TestRuleHealthUpdates(t *testing.T) { Appendable: st, Queryable: st, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("a + 1") @@ -1345,7 +1345,7 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) { Appendable: storage, Queryable: storage, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, OutageTolerance: 30 * time.Minute, ForGracePeriod: 10 * time.Minute, @@ -1431,7 +1431,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) { Appendable: storage, Queryable: storage, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("sum(histogram_metric)") @@ -1479,7 +1479,7 @@ func TestManager_LoadGroups_ShouldCheckWhetherEachRuleHasDependentsAndDependenci ruleManager := NewManager(&ManagerOptions{ Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Appendable: storage, QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil }, }) @@ -1535,7 +1535,7 @@ func TestDependencyMap(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") @@ -1544,7 +1544,7 @@ func TestDependencyMap(t *testing.T) { expr, err = parser.ParseExpr("user:requests:rate1m <= 0") require.NoError(t, err) - rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger()) + rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger()) expr, err = parser.ParseExpr("sum by (user) (rate(requests[5m]))") require.NoError(t, err) @@ -1584,7 +1584,7 @@ func TestNoDependency(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") @@ -1607,7 +1607,7 @@ func TestDependenciesEdgeCases(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } t.Run("empty group", func(t *testing.T) { @@ -1765,7 +1765,7 @@ func TestNoMetricSelector(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") @@ -1794,7 +1794,7 @@ func TestDependentRulesWithNonMetricExpression(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") @@ -1803,7 +1803,7 @@ func TestDependentRulesWithNonMetricExpression(t *testing.T) { expr, err = parser.ParseExpr("user:requests:rate1m <= 0") require.NoError(t, err) - rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger()) + rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger()) expr, err = parser.ParseExpr("3") require.NoError(t, err) @@ -1826,7 +1826,7 @@ func TestRulesDependentOnMetaMetrics(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } // This rule is not dependent on any other rules in its group but it does depend on `ALERTS`, which is produced by @@ -1855,7 +1855,7 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) { files := []string{"fixtures/rules.yaml"} ruleManager := NewManager(&ManagerOptions{ Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) ruleManager.start() @@ -2107,7 +2107,7 @@ func TestUpdateWhenStopped(t *testing.T) { files := []string{"fixtures/rules.yaml"} ruleManager := NewManager(&ManagerOptions{ Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) ruleManager.start() err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil) @@ -2129,7 +2129,7 @@ func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.I return &ManagerOptions{ Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), ConcurrentEvalsEnabled: concurrent, MaxConcurrentEvals: maxConcurrent, Appendable: storage, diff --git a/rules/origin_test.go b/rules/origin_test.go index 75c83f9a4e..0bf428f3c1 100644 --- a/rules/origin_test.go +++ b/rules/origin_test.go @@ -19,9 +19,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -96,7 +97,7 @@ func TestNewRuleDetail(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) detail := NewRuleDetail(rule) diff --git a/scrape/manager.go b/scrape/manager.go index cbb881028d..8d22bd9d18 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -17,32 +17,32 @@ import ( "errors" "fmt" "hash/fnv" - "io" + "log/slog" "reflect" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/osutil" "github.com/prometheus/prometheus/util/pool" ) // NewManager is the Manager constructor. -func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(string) (log.Logger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { +func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { if o == nil { o = &Options{} } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } sm, err := newScrapeMetrics(registerer) @@ -100,7 +100,7 @@ const DefaultNameEscapingScheme = model.ValueEncodingEscaping // when receiving new target groups from the discovery manager. type Manager struct { opts *Options - logger log.Logger + logger *slog.Logger append storage.Appendable graceShut chan struct{} @@ -108,8 +108,8 @@ type Manager struct { mtxScrape sync.Mutex // Guards the fields below. scrapeConfigs map[string]*config.ScrapeConfig scrapePools map[string]*scrapePool - newScrapeFailureLogger func(string) (log.Logger, error) - scrapeFailureLoggers map[string]log.Logger + newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error) + scrapeFailureLoggers map[string]*logging.JSONFileLogger targetSets map[string][]*targetgroup.Group buffers *pool.Pool @@ -175,21 +175,21 @@ func (m *Manager) reload() { if _, ok := m.scrapePools[setName]; !ok { scrapeConfig, ok := m.scrapeConfigs[setName] if !ok { - level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) + m.logger.Error("error reloading target set", "err", "invalid config id:"+setName) continue } m.metrics.targetScrapePools.Inc() - sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.buffers, m.opts, m.metrics) + sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics) if err != nil { m.metrics.targetScrapePoolsFailed.Inc() - level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) + m.logger.Error("error creating new scrape pool", "err", err, "scrape_pool", setName) continue } m.scrapePools[setName] = sp if l, ok := m.scrapeFailureLoggers[scrapeConfig.ScrapeFailureLogFile]; ok { sp.SetScrapeFailureLogger(l) } else { - level.Error(sp.logger).Log("msg", "No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", setName) + sp.logger.Error("No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", setName) } } @@ -246,7 +246,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { } c := make(map[string]*config.ScrapeConfig) - scrapeFailureLoggers := map[string]log.Logger{ + scrapeFailureLoggers := map[string]*logging.JSONFileLogger{ "": nil, // Emptying the file name sets the scrape logger to nil. } for _, scfg := range scfgs { @@ -254,23 +254,23 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { if _, ok := scrapeFailureLoggers[scfg.ScrapeFailureLogFile]; !ok { // We promise to reopen the file on each reload. var ( - l log.Logger - err error + logger *logging.JSONFileLogger + err error ) if m.newScrapeFailureLogger != nil { - if l, err = m.newScrapeFailureLogger(scfg.ScrapeFailureLogFile); err != nil { + if logger, err = m.newScrapeFailureLogger(scfg.ScrapeFailureLogFile); err != nil { return err } } - scrapeFailureLoggers[scfg.ScrapeFailureLogFile] = l + scrapeFailureLoggers[scfg.ScrapeFailureLogFile] = logger } } m.scrapeConfigs = c oldScrapeFailureLoggers := m.scrapeFailureLoggers for _, s := range oldScrapeFailureLoggers { - if closer, ok := s.(io.Closer); ok { - defer closer.Close() + if s != nil { + defer s.Close() } } @@ -290,7 +290,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { case !reflect.DeepEqual(sp.config, cfg): err := sp.reload(cfg) if err != nil { - level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) + m.logger.Error("error reloading scrape pool", "err", err, "scrape_pool", name) failed = true } fallthrough @@ -298,7 +298,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { if l, ok := m.scrapeFailureLoggers[cfg.ScrapeFailureLogFile]; ok { sp.SetScrapeFailureLogger(l) } else { - level.Error(sp.logger).Log("msg", "No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", name) + sp.logger.Error("No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", name) } } } diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 8d2c3c9681..c3544f6344 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -27,12 +27,12 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v2" @@ -730,7 +730,7 @@ func setupScrapeManager(t *testing.T, honorTimestamps, enableCTZeroIngestion boo EnableCreatedTimestampZeroIngestion: enableCTZeroIngestion, skipOffsetting: true, }, - log.NewLogfmtLogger(os.Stderr), + promslog.New(&promslog.Config{}), nil, &collectResultAppendable{app}, prometheus.NewRegistry(), @@ -984,7 +984,7 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) { EnableNativeHistogramsIngestion: true, skipOffsetting: true, }, - log.NewLogfmtLogger(os.Stderr), + promslog.New(&promslog.Config{}), nil, &collectResultAppendable{app}, prometheus.NewRegistry(), @@ -1125,7 +1125,7 @@ func runManagers(t *testing.T, ctx context.Context) (*discovery.Manager, *Manage require.NoError(t, err) discoveryManager := discovery.NewManager( ctx, - log.NewNopLogger(), + promslog.NewNopLogger(), reg, sdMetrics, discovery.Updatert(100*time.Millisecond), diff --git a/scrape/scrape.go b/scrape/scrape.go index c66f203ddc..77572bb55e 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "net/http" "reflect" @@ -29,11 +30,10 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/klauspost/compress/gzip" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/config" @@ -47,6 +47,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/pool" ) @@ -63,7 +64,7 @@ var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels. // scrapePool manages scrapes for sets of targets. type scrapePool struct { appendable storage.Appendable - logger log.Logger + logger *slog.Logger cancel context.CancelFunc httpOpts []config_util.HTTPClientOption @@ -89,7 +90,7 @@ type scrapePool struct { metrics *scrapeMetrics - scrapeFailureLogger log.Logger + scrapeFailureLogger *logging.JSONFileLogger scrapeFailureLoggerMtx sync.RWMutex } @@ -124,9 +125,9 @@ const maxAheadTime = 10 * time.Minute // returning an empty label set is interpreted as "drop". type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger *slog.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) @@ -159,7 +160,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed return newScrapeLoop( ctx, opts.scraper, - log.With(logger, "target", opts.target), + logger.With("target", opts.target), buffers, func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc) @@ -218,11 +219,11 @@ func (sp *scrapePool) DroppedTargetsCount() int { return sp.droppedTargetsCount } -func (sp *scrapePool) SetScrapeFailureLogger(l log.Logger) { +func (sp *scrapePool) SetScrapeFailureLogger(l *logging.JSONFileLogger) { sp.scrapeFailureLoggerMtx.Lock() defer sp.scrapeFailureLoggerMtx.Unlock() if l != nil { - l = log.With(l, "job_name", sp.config.JobName) + l.With("job_name", sp.config.JobName) } sp.scrapeFailureLogger = l @@ -233,7 +234,7 @@ func (sp *scrapePool) SetScrapeFailureLogger(l log.Logger) { } } -func (sp *scrapePool) getScrapeFailureLogger() log.Logger { +func (sp *scrapePool) getScrapeFailureLogger() *logging.JSONFileLogger { sp.scrapeFailureLoggerMtx.RLock() defer sp.scrapeFailureLoggerMtx.RUnlock() return sp.scrapeFailureLogger @@ -428,7 +429,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { for _, tg := range tgs { targets, failures := TargetsFromGroup(tg, sp.config, targets, lb) for _, err := range failures { - level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) + sp.logger.Error("Creating target failed", "err", err) } sp.metrics.targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures))) for _, t := range targets { @@ -849,7 +850,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w type loop interface { run(errc chan<- error) setForcedError(err error) - setScrapeFailureLogger(log.Logger) + setScrapeFailureLogger(*logging.JSONFileLogger) stop() getCache() *scrapeCache disableEndOfRunStalenessMarkers() @@ -864,8 +865,8 @@ type cacheEntry struct { type scrapeLoop struct { scraper scraper - l log.Logger - scrapeFailureLogger log.Logger + l *slog.Logger + scrapeFailureLogger *logging.JSONFileLogger scrapeFailureLoggerMtx sync.RWMutex cache *scrapeCache lastScrapeSize int @@ -1165,7 +1166,7 @@ func (c *scrapeCache) LengthMetadata() int { func newScrapeLoop(ctx context.Context, sc scraper, - l log.Logger, + l *slog.Logger, buffers *pool.Pool, sampleMutator labelsMutator, reportSampleMutator labelsMutator, @@ -1194,7 +1195,7 @@ func newScrapeLoop(ctx context.Context, validationScheme model.ValidationScheme, ) *scrapeLoop { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if buffers == nil { buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) @@ -1250,11 +1251,11 @@ func newScrapeLoop(ctx context.Context, return sl } -func (sl *scrapeLoop) setScrapeFailureLogger(l log.Logger) { +func (sl *scrapeLoop) setScrapeFailureLogger(l *logging.JSONFileLogger) { sl.scrapeFailureLoggerMtx.Lock() defer sl.scrapeFailureLoggerMtx.Unlock() if ts, ok := sl.scraper.(fmt.Stringer); ok && l != nil { - l = log.With(l, "target", ts.String()) + l.With("target", ts.String()) } sl.scrapeFailureLogger = l } @@ -1352,13 +1353,13 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er } err = app.Commit() if err != nil { - level.Error(sl.l).Log("msg", "Scrape commit failed", "err", err) + sl.l.Error("Scrape commit failed", "err", err) } }() defer func() { if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil { - level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) + sl.l.Warn("Appending scrape report failed", "err", err) } }() @@ -1368,7 +1369,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Append failed", "err", err) + sl.l.Warn("Append failed", "err", err) } if errc != nil { errc <- forcedErr @@ -1401,10 +1402,10 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er } bytesRead = len(b) } else { - level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr) + sl.l.Debug("Scrape failed", "err", scrapeErr) sl.scrapeFailureLoggerMtx.RLock() if sl.scrapeFailureLogger != nil { - sl.scrapeFailureLogger.Log("err", scrapeErr) + sl.scrapeFailureLogger.Error("err", scrapeErr) } sl.scrapeFailureLoggerMtx.RUnlock() if errc != nil { @@ -1421,13 +1422,13 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er if appErr != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) + sl.l.Debug("Append failed", "err", appErr) // The append failed, probably due to a parse error or sample limit. // Call sl.append again with an empty scrape to trigger stale markers. if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Append failed", "err", err) + sl.l.Warn("Append failed", "err", err) } } @@ -1500,16 +1501,16 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int } err = app.Commit() if err != nil { - level.Warn(sl.l).Log("msg", "Stale commit failed", "err", err) + sl.l.Warn("Stale commit failed", "err", err) } }() if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Stale append failed", "err", err) + sl.l.Warn("Stale append failed", "err", err) } if err = sl.reportStale(app, staleTime); err != nil { - level.Warn(sl.l).Log("msg", "Stale report failed", "err", err) + sl.l.Warn("Stale report failed", "err", err) } } @@ -1538,8 +1539,8 @@ type appendErrors struct { func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.enableCTZeroIngestion, sl.symbolTable) if err != nil { - level.Debug(sl.l).Log( - "msg", "Invalid content type on scrape, using prometheus parser as fallback.", + sl.l.Debug( + "Invalid content type on scrape, using prometheus parser as fallback.", "content_type", contentType, "err", err, ) @@ -1710,7 +1711,7 @@ loop: if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. // CT is an experimental feature. For now, we don't need to fail the // scrape on errors updating the created timestamp, log debug. - level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + sl.l.Debug("Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) } } } @@ -1735,7 +1736,7 @@ loop: sampleAdded, err = sl.checkAddError(met, err, &sampleLimitErr, &bucketLimitErr, &appErrs) if err != nil { if !errors.Is(err, storage.ErrNotFound) { - level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) + sl.l.Debug("Unexpected error", "series", string(met), "err", err) } break loop } @@ -1787,21 +1788,21 @@ loop: outOfOrderExemplars++ default: // Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors. - level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) + sl.l.Debug("Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) } } if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) { // Only report out of order exemplars if all are out of order, otherwise this was a partial update // to some existing set of exemplars. appErrs.numExemplarOutOfOrder += outOfOrderExemplars - level.Debug(sl.l).Log("msg", "Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) + sl.l.Debug("Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars)) } if sl.appendMetadataToWAL && metadataChanged { if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil { // No need to fail the scrape on errors appending metadata. - level.Debug(sl.l).Log("msg", "Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) + sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) } } } @@ -1820,16 +1821,16 @@ loop: sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc() } if appErrs.numOutOfOrder > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) + sl.l.Warn("Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) } if appErrs.numDuplicates > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates) + sl.l.Warn("Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates) } if appErrs.numOutOfBounds > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds) + sl.l.Warn("Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds) } if appErrs.numExemplarOutOfOrder > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) + sl.l.Warn("Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) } if err == nil { sl.cache.forEachStale(func(lset labels.Labels) bool { @@ -1857,17 +1858,17 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke return false, storage.ErrNotFound case errors.Is(err, storage.ErrOutOfOrderSample): appErrs.numOutOfOrder++ - level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met)) + sl.l.Debug("Out of order sample", "series", string(met)) sl.metrics.targetScrapeSampleOutOfOrder.Inc() return false, nil case errors.Is(err, storage.ErrDuplicateSampleForTimestamp): appErrs.numDuplicates++ - level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met)) + sl.l.Debug("Duplicate sample for timestamp", "series", string(met)) sl.metrics.targetScrapeSampleDuplicate.Inc() return false, nil case errors.Is(err, storage.ErrOutOfBounds): appErrs.numOutOfBounds++ - level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) + sl.l.Debug("Out of bounds metric", "series", string(met)) sl.metrics.targetScrapeSampleOutOfBounds.Inc() return false, nil case errors.Is(err, errSampleLimit): diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index f065ecebbc..bdf85a451c 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -31,7 +31,6 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/grafana/regexp" @@ -40,6 +39,7 @@ import ( dto "github.com/prometheus/client_model/go" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" @@ -54,6 +54,7 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/pool" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" @@ -159,7 +160,7 @@ type testLoop struct { timeout time.Duration } -func (l *testLoop) setScrapeFailureLogger(log.Logger) { +func (l *testLoop) setScrapeFailureLogger(*logging.JSONFileLogger) { } func (l *testLoop) run(errc chan<- error) { @@ -396,7 +397,7 @@ func TestScrapePoolTargetLimit(t *testing.T) { activeTargets: map[uint64]*Target{}, loops: map[uint64]loop{}, newLoop: newLoop, - logger: log.NewNopLogger(), + logger: promslog.NewNopLogger(), client: http.DefaultClient, metrics: newTestScrapeMetrics(t), symbolTable: labels.NewSymbolTable(), @@ -3061,7 +3062,7 @@ func TestReuseCacheRace(t *testing.T) { func TestCheckAddError(t *testing.T) { var appErrs appendErrors - sl := scrapeLoop{l: log.NewNopLogger(), metrics: newTestScrapeMetrics(t)} + sl := scrapeLoop{l: promslog.NewNopLogger(), metrics: newTestScrapeMetrics(t)} sl.checkAddError(nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs) require.Equal(t, 1, appErrs.numOutOfOrder) } @@ -3834,7 +3835,7 @@ scrape_configs: mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) require.NoError(t, err) - cfg, err := config.Load(configStr, false, log.NewNopLogger()) + cfg, err := config.Load(configStr, false, promslog.NewNopLogger()) require.NoError(t, err) mng.ApplyConfig(cfg) tsets := make(chan map[string][]*targetgroup.Group) diff --git a/storage/fanout.go b/storage/fanout.go index 80022b2566..6ff5178955 100644 --- a/storage/fanout.go +++ b/storage/fanout.go @@ -15,9 +15,8 @@ package storage import ( "context" + "log/slog" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -28,7 +27,7 @@ import ( ) type fanout struct { - logger log.Logger + logger *slog.Logger primary Storage secondaries []Storage @@ -43,7 +42,7 @@ type fanout struct { // and the error from the secondary querier will be returned as a warning. // // NOTE: In the case of Prometheus, it treats all remote storages as secondary / best effort. -func NewFanout(logger log.Logger, primary Storage, secondaries ...Storage) Storage { +func NewFanout(logger *slog.Logger, primary Storage, secondaries ...Storage) Storage { return &fanout{ logger: logger, primary: primary, @@ -142,7 +141,7 @@ func (f *fanout) Close() error { // fanoutAppender implements Appender. type fanoutAppender struct { - logger log.Logger + logger *slog.Logger primary Appender secondaries []Appender @@ -240,7 +239,7 @@ func (f *fanoutAppender) Commit() (err error) { err = appender.Commit() } else { if rollbackErr := appender.Rollback(); rollbackErr != nil { - level.Error(f.logger).Log("msg", "Squashed rollback error on commit", "err", rollbackErr) + f.logger.Error("Squashed rollback error on commit", "err", rollbackErr) } } } @@ -256,7 +255,7 @@ func (f *fanoutAppender) Rollback() (err error) { case err == nil: err = rollbackErr case rollbackErr != nil: - level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr) + f.logger.Error("Squashed rollback error on rollback", "err", rollbackErr) } } return nil diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 5b058d84ec..c2fe6186ce 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -20,9 +20,9 @@ import ( "sync" "testing" - "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" @@ -582,7 +582,7 @@ func TestDecodeWriteRequest(t *testing.T) { } func TestDecodeWriteV2Request(t *testing.T) { - buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") + buf, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) actual, err := DecodeWriteV2Request(bytes.NewReader(buf)) diff --git a/storage/remote/metadata_watcher.go b/storage/remote/metadata_watcher.go index fdcd668f56..9306dcb4c2 100644 --- a/storage/remote/metadata_watcher.go +++ b/storage/remote/metadata_watcher.go @@ -16,11 +16,11 @@ package remote import ( "context" "errors" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/scrape" ) @@ -44,7 +44,7 @@ func (noop *noopScrapeManager) Get() (*scrape.Manager, error) { // MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo. type MetadataWatcher struct { name string - logger log.Logger + logger *slog.Logger managerGetter ReadyScrapeManager manager Watchable @@ -62,9 +62,9 @@ type MetadataWatcher struct { } // NewMetadataWatcher builds a new MetadataWatcher. -func NewMetadataWatcher(l log.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { +func NewMetadataWatcher(l *slog.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if mg == nil { @@ -87,7 +87,7 @@ func NewMetadataWatcher(l log.Logger, mg ReadyScrapeManager, name string, w Meta // Start the MetadataWatcher. func (mw *MetadataWatcher) Start() { - level.Info(mw.logger).Log("msg", "Starting scraped metadata watcher") + mw.logger.Info("Starting scraped metadata watcher") mw.hardShutdownCtx, mw.hardShutdownCancel = context.WithCancel(context.Background()) mw.softShutdownCtx, mw.softShutdownCancel = context.WithCancel(mw.hardShutdownCtx) go mw.loop() @@ -95,15 +95,15 @@ func (mw *MetadataWatcher) Start() { // Stop the MetadataWatcher. func (mw *MetadataWatcher) Stop() { - level.Info(mw.logger).Log("msg", "Stopping metadata watcher...") - defer level.Info(mw.logger).Log("msg", "Scraped metadata watcher stopped") + mw.logger.Info("Stopping metadata watcher...") + defer mw.logger.Info("Scraped metadata watcher stopped") mw.softShutdownCancel() select { case <-mw.done: return case <-time.After(mw.deadline): - level.Error(mw.logger).Log("msg", "Failed to flush metadata") + mw.logger.Error("Failed to flush metadata") } mw.hardShutdownCancel() diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index bd0e777e8c..5fdd26ef29 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -23,12 +23,13 @@ import ( "time" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "github.com/prometheus/prometheus/prompb" + prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go index e932269644..b01d2cb1fe 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go @@ -22,10 +22,11 @@ import ( "time" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/prometheus/prometheus/prompb" ) func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 32cb6c498b..9f27c333a6 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -17,17 +17,17 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "strconv" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" @@ -407,7 +407,7 @@ type QueueManager struct { reshardDisableStartTimestamp atomic.Int64 // Time that reshard was disabled. reshardDisableEndTimestamp atomic.Int64 // Time that reshard is disabled until. - logger log.Logger + logger *slog.Logger flushDeadline time.Duration cfg config.QueueConfig mcfg config.MetadataConfig @@ -454,7 +454,7 @@ func NewQueueManager( metrics *queueManagerMetrics, watcherMetrics *wlog.WatcherMetrics, readerMetrics *wlog.LiveReaderMetrics, - logger log.Logger, + logger *slog.Logger, dir string, samplesIn *ewmaRate, cfg config.QueueConfig, @@ -471,7 +471,7 @@ func NewQueueManager( protoMsg config.RemoteWriteProtoMsg, ) *QueueManager { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } // Copy externalLabels into a slice, which we need for processExternalLabels. @@ -480,7 +480,7 @@ func NewQueueManager( extLabelsSlice = append(extLabelsSlice, l) }) - logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint()) + logger = logger.With(remoteName, client.Name(), endpoint, client.Endpoint()) t := &QueueManager{ logger: logger, flushDeadline: flushDeadline, @@ -526,7 +526,7 @@ func NewQueueManager( // ships them alongside series. If both mechanisms are set, the new one // takes precedence by implicitly disabling the older one. if t.mcfg.Send && t.protoMsg != config.RemoteWriteProtoMsgV1 { - level.Warn(logger).Log("msg", "usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request") + logger.Warn("usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request") t.mcfg.Send = false } @@ -567,7 +567,7 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last], pBuf) if err != nil { t.metrics.failedMetadataTotal.Add(float64(last - (i * t.mcfg.MaxSamplesPerSend))) - level.Error(t.logger).Log("msg", "non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) + t.logger.Error("non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) } } } @@ -706,7 +706,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[s.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) + t.logger.Info("Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) t.metrics.droppedSamplesTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedSamplesTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -769,7 +769,7 @@ outer: // Track dropped exemplars in the same EWMA for sharding calc. t.dataDropped.incr(1) if _, ok := t.droppedSeries[e.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) + t.logger.Info("Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) t.metrics.droppedExemplarsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedExemplarsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -825,7 +825,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + t.logger.Info("Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -880,7 +880,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + t.logger.Info("Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -944,8 +944,8 @@ func (t *QueueManager) Start() { // Stop stops sending samples to the remote storage and waits for pending // sends to complete. func (t *QueueManager) Stop() { - level.Info(t.logger).Log("msg", "Stopping remote storage...") - defer level.Info(t.logger).Log("msg", "Remote storage stopped.") + t.logger.Info("Stopping remote storage...") + defer t.logger.Info("Remote storage stopped.") close(t.quit) t.wg.Wait() @@ -1093,10 +1093,10 @@ func (t *QueueManager) updateShardsLoop() { // to stay close to shardUpdateDuration. select { case t.reshardChan <- desiredShards: - level.Info(t.logger).Log("msg", "Remote storage resharding", "from", t.numShards, "to", desiredShards) + t.logger.Info("Remote storage resharding", "from", t.numShards, "to", desiredShards) t.numShards = desiredShards default: - level.Info(t.logger).Log("msg", "Currently resharding, skipping.") + t.logger.Info("Currently resharding, skipping.") } case <-t.quit: return @@ -1114,14 +1114,14 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool { minSendTimestamp := time.Now().Add(-1 * shardUpdateDuration).Unix() lsts := t.lastSendTimestamp.Load() if lsts < minSendTimestamp { - level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) + t.logger.Warn("Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) return false } if disableTimestamp := t.reshardDisableEndTimestamp.Load(); time.Now().Unix() < disableTimestamp { disabledAt := time.Unix(t.reshardDisableStartTimestamp.Load(), 0) disabledFor := time.Until(time.Unix(disableTimestamp, 0)) - level.Warn(t.logger).Log("msg", "Skipping resharding, resharding is disabled while waiting for recoverable errors", "disabled_at", disabledAt, "disabled_for", disabledFor) + t.logger.Warn("Skipping resharding, resharding is disabled while waiting for recoverable errors", "disabled_at", disabledAt, "disabled_for", disabledFor) return false } return true @@ -1164,7 +1164,7 @@ func (t *QueueManager) calculateDesiredShards() int { desiredShards = timePerSample * (dataInRate*dataKeptRatio + backlogCatchup) ) t.metrics.desiredNumShards.Set(desiredShards) - level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards", + t.logger.Debug("QueueManager.calculateDesiredShards", "dataInRate", dataInRate, "dataOutRate", dataOutRate, "dataKeptRatio", dataKeptRatio, @@ -1182,7 +1182,7 @@ func (t *QueueManager) calculateDesiredShards() int { lowerBound = float64(t.numShards) * (1. - shardToleranceFraction) upperBound = float64(t.numShards) * (1. + shardToleranceFraction) ) - level.Debug(t.logger).Log("msg", "QueueManager.updateShardsLoop", + t.logger.Debug("QueueManager.updateShardsLoop", "lowerBound", lowerBound, "desiredShards", desiredShards, "upperBound", upperBound) desiredShards = math.Ceil(desiredShards) // Round up to be on the safe side. @@ -1193,7 +1193,7 @@ func (t *QueueManager) calculateDesiredShards() int { numShards := int(desiredShards) // Do not downshard if we are more than ten seconds back. if numShards < t.numShards && delay > 10.0 { - level.Debug(t.logger).Log("msg", "Not downsharding due to being too far behind") + t.logger.Debug("Not downsharding due to being too far behind") return t.numShards } @@ -1321,7 +1321,7 @@ func (s *shards) stop() { // Log error for any dropped samples, exemplars, or histograms. logDroppedError := func(t string, counter atomic.Uint32) { if dropped := counter.Load(); dropped > 0 { - level.Error(s.qm.logger).Log("msg", fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped) + s.qm.logger.Error(fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped) } } logDroppedError("samples", s.samplesDroppedOnHardShutdown) @@ -1564,7 +1564,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) n := nPendingSamples + nPendingExemplars + nPendingHistograms if timer { - level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, + s.qm.logger.Debug("runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms) } _ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf, enc) @@ -1691,9 +1691,9 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarDiff)) } if err != nil { - level.Error(s.qm.logger).Log("msg", "non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) + s.qm.logger.Error("non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) } else if sampleDiff+exemplarDiff+histogramDiff > 0 { - level.Error(s.qm.logger).Log("msg", "we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) + s.qm.logger.Error("we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) } // These counters are used to calculate the dynamic sharding, and as such @@ -2018,9 +2018,9 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt switch { case backoffErr.retryAfter > 0: sleepDuration = backoffErr.retryAfter - level.Info(t.logger).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration) + t.logger.Info("Retrying after duration specified by Retry-After header", "duration", sleepDuration) case backoffErr.retryAfter < 0: - level.Debug(t.logger).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism") + t.logger.Debug("retry-after cannot be in past, retrying using default backoff mechanism") } // We should never reshard for a recoverable error; increasing shards could @@ -2047,7 +2047,7 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt // If we make it this far, we've encountered a recoverable error and will retry. onRetry() - level.Warn(t.logger).Log("msg", "Failed to send batch, retrying", "err", err) + t.logger.Warn("Failed to send batch, retrying", "err", err) backoff = sleepDuration * 2 @@ -2147,12 +2147,12 @@ func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed [] } } -func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { +func buildWriteRequest(logger *slog.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter) if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { - level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) + logger.Debug("dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) } req := &prompb.WriteRequest{ @@ -2185,11 +2185,11 @@ func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metada return compressed, highest, lowest, nil } -func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { +func buildV2WriteRequest(logger *slog.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildV2TimeSeries(samples, filter) if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { - level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) + logger.Debug("dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) } req := &writev2.Request{ diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 99fd023066..4b7c5a4e90 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -28,13 +28,13 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/google/go-cmp/cmp" "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" @@ -1418,8 +1418,7 @@ func BenchmarkStartup(b *testing.B) { } sort.Ints(segments) - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) - logger = log.With(logger, "caller", log.DefaultCaller) + logger := promslog.New(&promslog.Config{}) cfg := testDefaultQueueConfig() mcfg := config.DefaultMetadataConfig @@ -1853,7 +1852,7 @@ func createDummyTimeSeries(instances int) []timeSeries { } func BenchmarkBuildWriteRequest(b *testing.B) { - noopLogger := log.NewNopLogger() + noopLogger := promslog.NewNopLogger() bench := func(b *testing.B, batch []timeSeries) { buff := make([]byte, 0) seriesBuff := make([]prompb.TimeSeries, len(batch)) @@ -1893,7 +1892,7 @@ func BenchmarkBuildWriteRequest(b *testing.B) { } func BenchmarkBuildV2WriteRequest(b *testing.B) { - noopLogger := log.NewNopLogger() + noopLogger := promslog.NewNopLogger() bench := func(b *testing.B, batch []timeSeries) { symbolTable := writev2.NewSymbolTable() buff := make([]byte, 0) diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go index ffc64c9c3f..8f2945f974 100644 --- a/storage/remote/read_handler.go +++ b/storage/remote/read_handler.go @@ -16,13 +16,12 @@ package remote import ( "context" "errors" + "log/slog" "net/http" "slices" "strings" "sync" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" @@ -34,7 +33,7 @@ import ( ) type readHandler struct { - logger log.Logger + logger *slog.Logger queryable storage.SampleAndChunkQueryable config func() config.Config remoteReadSampleLimit int @@ -46,7 +45,7 @@ type readHandler struct { // NewReadHandler creates a http.Handler that accepts remote read requests and // writes them to the provided queryable. -func NewReadHandler(logger log.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler { +func NewReadHandler(logger *slog.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler { h := &readHandler{ logger: logger, queryable: queryable, @@ -140,7 +139,7 @@ func (h *readHandler) remoteReadSamples( } defer func() { if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on querier close", "err", err.Error()) + h.logger.Warn("Error on querier close", "err", err.Error()) } }() @@ -163,7 +162,7 @@ func (h *readHandler) remoteReadSamples( return err } for _, w := range ws { - level.Warn(h.logger).Log("msg", "Warnings on remote read query", "err", w.Error()) + h.logger.Warn("Warnings on remote read query", "err", w.Error()) } for _, ts := range resp.Results[i].Timeseries { ts.Labels = MergeLabels(ts.Labels, sortedExternalLabels) @@ -208,7 +207,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } defer func() { if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) + h.logger.Warn("Error on chunk querier close", "err", err.Error()) } }() @@ -239,7 +238,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } for _, w := range ws { - level.Warn(h.logger).Log("msg", "Warnings on chunked remote read query", "warnings", w.Error()) + h.logger.Warn("Warnings on chunked remote read query", "warnings", w.Error()) } return nil }(); err != nil { diff --git a/storage/remote/storage.go b/storage/remote/storage.go index 05634f1798..14c3c87d93 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -18,12 +18,13 @@ import ( "crypto/md5" "encoding/hex" "fmt" + "log/slog" "sync" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" @@ -51,8 +52,9 @@ type startTimeCallback func() (int64, error) // Storage represents all the remote read and write endpoints. It implements // storage.Storage. type Storage struct { - logger *logging.Deduper - mtx sync.Mutex + deduper *logging.Deduper + logger *slog.Logger + mtx sync.Mutex rws *WriteStorage @@ -62,14 +64,16 @@ type Storage struct { } // NewStorage returns a remote.Storage. -func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage { +func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } - logger := logging.Dedupe(l, 1*time.Minute) + deduper := logging.Dedupe(l, 1*time.Minute) + logger := slog.New(deduper) s := &Storage{ logger: logger, + deduper: deduper, localStartTimeCallback: stCallback, } s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, metadataInWAL) @@ -196,7 +200,7 @@ func (s *Storage) LowestSentTimestamp() int64 { // Close the background processing of the storage queues. func (s *Storage) Close() error { - s.logger.Stop() + s.deduper.Stop() s.mtx.Lock() defer s.mtx.Unlock() return s.rws.Close() diff --git a/storage/remote/write.go b/storage/remote/write.go index 624732c4fe..20e4ed10d1 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -17,13 +17,14 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "sync" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -57,7 +58,7 @@ var ( // WriteStorage represents all the remote write storage. type WriteStorage struct { - logger log.Logger + logger *slog.Logger reg prometheus.Registerer mtx sync.Mutex @@ -78,9 +79,9 @@ type WriteStorage struct { } // NewWriteStorage creates and runs a WriteStorage. -func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage { +func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } rws := &WriteStorage{ queues: make(map[string]*QueueManager), diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 736bc8eff3..466673c99d 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -18,12 +18,11 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" @@ -42,7 +41,7 @@ import ( ) type writeHandler struct { - logger log.Logger + logger *slog.Logger appendable storage.Appendable samplesWithInvalidLabelsTotal prometheus.Counter @@ -58,7 +57,7 @@ const maxAheadTime = 10 * time.Minute // // NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible // as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write. -func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { +func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{} for _, acc := range acceptedProtoMsgs { protoMsgs[acc] = struct{}{} @@ -119,7 +118,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { msgType, err := h.parseProtoMsg(contentType) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) return } @@ -131,7 +130,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } return ret }()) - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) } @@ -142,14 +141,14 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // We could give http.StatusUnsupportedMediaType, but let's assume snappy by default. } else if enc != string(SnappyBlockCompression) { err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, SnappyBlockCompression) - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) } // Read the request body. body, err := io.ReadAll(r.Body) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + h.logger.Error("Error decoding remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -157,7 +156,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { decompressed, err := snappy.Decode(nil, body) if err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decompressing remote write request", "err", err.Error()) + h.logger.Error("Error decompressing remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -169,7 +168,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var req prompb.WriteRequest if err := proto.Unmarshal(decompressed, &req); err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error()) + h.logger.Error("Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -180,7 +179,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return default: - level.Error(h.logger).Log("msg", "Error while remote writing the v1 request", "err", err.Error()) + h.logger.Error("Error while remote writing the v1 request", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -193,7 +192,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var req writev2.Request if err := proto.Unmarshal(decompressed, &req); err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error()) + h.logger.Error("Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -205,7 +204,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { if errHTTPCode/5 == 100 { // 5xx - level.Error(h.logger).Log("msg", "Error while remote writing the v2 request", "err", err.Error()) + h.logger.Error("Error while remote writing the v2 request", "err", err.Error()) } http.Error(w, err.Error(), errHTTPCode) return @@ -241,11 +240,11 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err // TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are // potentially written. Perhaps unify with fixed writeV2 implementation a bit. if !ls.Has(labels.MetricName) || !ls.IsValid(model.NameValidationScheme) { - level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String()) + h.logger.Warn("Invalid metric names or labels", "got", ls.String()) samplesWithInvalidLabels++ continue } else if duplicateLabel, hasDuplicate := ls.HasDuplicateLabelNames(); hasDuplicate { - level.Warn(h.logger).Log("msg", "Invalid labels for series.", "labels", ls.String(), "duplicated_label", duplicateLabel) + h.logger.Warn("Invalid labels for series.", "labels", ls.String(), "duplicated_label", duplicateLabel) samplesWithInvalidLabels++ continue } @@ -261,10 +260,10 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err switch { case errors.Is(err, storage.ErrOutOfOrderExemplar): outOfOrderExemplarErrs++ - level.Debug(h.logger).Log("msg", "Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Debug("Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) default: // Since exemplar storage is still experimental, we don't fail the request on ingestion errors - level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) + h.logger.Debug("Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) } } } @@ -276,7 +275,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err } if outOfOrderExemplarErrs > 0 { - _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) + h.logger.Warn("Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } if samplesWithInvalidLabels > 0 { h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) @@ -293,7 +292,7 @@ func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) + h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) } return err } @@ -315,7 +314,7 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) + h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) } return err } @@ -345,7 +344,7 @@ func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ Wri // On 5xx, we always rollback, because we expect // sender to retry and TSDB is not idempotent. if rerr := app.Rollback(); rerr != nil { - level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr) + h.logger.Error("writev2 rollback failed on retry-able error", "err", rerr) } return WriteResponseStats{}, errHTTPCode, err } @@ -407,7 +406,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * errors.Is(err, storage.ErrDuplicateSampleForTimestamp) || errors.Is(err, storage.ErrTooOldSample) { // TODO(bwplotka): Not too spammy log? - level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp) + h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } @@ -432,7 +431,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { // TODO(bwplotka): Not too spammy log? - level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) + h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } @@ -450,18 +449,18 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // Handle append error. if errors.Is(err, storage.ErrOutOfOrderExemplar) { outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here. - level.Error(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Error("Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } // TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed. // For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx. - level.Error(h.logger).Log("msg", "failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Error("failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) } m := ts.ToMetadata(req.Symbols) if _, err = app.UpdateMetadata(ref, ls, m); err != nil { - level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err) + h.logger.Debug("error while updating metadata from remote write", "err", err) // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information, // we don't report remote write error either. We increment metric instead. samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar @@ -469,7 +468,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * } if outOfOrderExemplarErrs > 0 { - level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) + h.logger.Warn("Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) @@ -482,7 +481,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler { +func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler { rwHandler := &writeHandler{ logger: logger, appendable: appendable, @@ -496,7 +495,7 @@ func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, confi } type otlpWriteHandler struct { - logger log.Logger + logger *slog.Logger rwHandler *writeHandler configFunc func() config.Config } @@ -504,7 +503,7 @@ type otlpWriteHandler struct { func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { req, err := DecodeOTLPWriteRequest(r) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + h.logger.Error("Error decoding remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -517,11 +516,11 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, }) if err != nil { - level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) + h.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) } ws, _ := annots.AsStrings("", 0, 0) if len(ws) > 0 { - level.Warn(h.logger).Log("msg", "Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) + h.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) } err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ @@ -535,7 +534,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return default: - level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error()) + h.logger.Error("Error appending remote write", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 8e628f40de..d91949131b 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -27,11 +27,12 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -129,7 +130,7 @@ func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) { } appendable := &mockAppendable{} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -144,7 +145,7 @@ func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) { } func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) { - payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") + payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) for _, tc := range []struct { @@ -230,7 +231,7 @@ func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) { } appendable := &mockAppendable{} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -255,7 +256,7 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) { // in Prometheus, so keeping like this to not break existing 1.0 clients. appendable := &mockAppendable{} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -428,7 +429,7 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), tc.input, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") + payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), tc.input, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(payload)) @@ -445,7 +446,7 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { appendExemplarErr: tc.appendExemplarErr, updateMetadataErr: tc.updateMetadataErr, } - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -544,7 +545,7 @@ func TestOutOfOrderSample_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -586,7 +587,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -624,7 +625,7 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -655,7 +656,7 @@ func BenchmarkRemoteWriteHandler(b *testing.B) { appendable := &mockAppendable{} // TODO: test with other proto format(s) - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() b.ResetTimer() @@ -672,7 +673,7 @@ func TestCommitErr_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -685,7 +686,7 @@ func TestCommitErr_V1Message(t *testing.T) { } func TestCommitErr_V2Message(t *testing.T) { - payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") + payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(payload)) @@ -696,7 +697,7 @@ func TestCommitErr_V2Message(t *testing.T) { req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -723,7 +724,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { require.NoError(b, db.Close()) }) // TODO: test with other proto format(s) - handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy") require.NoError(b, err) diff --git a/tracing/tracing.go b/tracing/tracing.go index 6b9319ecbd..4fdedf505b 100644 --- a/tracing/tracing.go +++ b/tracing/tracing.go @@ -16,11 +16,10 @@ package tracing import ( "context" "fmt" + "log/slog" "reflect" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/version" "go.opentelemetry.io/otel" @@ -43,14 +42,14 @@ const serviceName = "prometheus" // Manager is capable of building, (re)installing and shutting down // the tracer provider. type Manager struct { - logger log.Logger + logger *slog.Logger done chan struct{} config config.TracingConfig shutdownFunc func() error } // NewManager creates a new tracing manager. -func NewManager(logger log.Logger) *Manager { +func NewManager(logger *slog.Logger) *Manager { return &Manager{ logger: logger, done: make(chan struct{}), @@ -62,7 +61,7 @@ func NewManager(logger log.Logger) *Manager { func (m *Manager) Run() { otel.SetTextMapPropagator(propagation.TraceContext{}) otel.SetErrorHandler(otelErrHandler(func(err error) { - level.Error(m.logger).Log("msg", "OpenTelemetry handler returned an error", "err", err) + m.logger.Error("OpenTelemetry handler returned an error", "err", err.Error()) })) <-m.done } @@ -89,7 +88,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { m.config = cfg.TracingConfig m.shutdownFunc = nil otel.SetTracerProvider(noop.NewTracerProvider()) - level.Info(m.logger).Log("msg", "Tracing provider uninstalled.") + m.logger.Info("Tracing provider uninstalled.") return nil } @@ -102,7 +101,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { m.config = cfg.TracingConfig otel.SetTracerProvider(tp) - level.Info(m.logger).Log("msg", "Successfully installed a new tracer provider.") + m.logger.Info("Successfully installed a new tracer provider.") return nil } @@ -115,10 +114,10 @@ func (m *Manager) Stop() { } if err := m.shutdownFunc(); err != nil { - level.Error(m.logger).Log("msg", "failed to shut down the tracer provider", "err", err) + m.logger.Error("failed to shut down the tracer provider", "err", err) } - level.Info(m.logger).Log("msg", "Tracing manager stopped") + m.logger.Info("Tracing manager stopped") } type otelErrHandler func(err error) diff --git a/tracing/tracing_test.go b/tracing/tracing_test.go index b7996c6104..e735e1a18a 100644 --- a/tracing/tracing_test.go +++ b/tracing/tracing_test.go @@ -16,8 +16,8 @@ package tracing import ( "testing" - "github.com/go-kit/log" config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace/noop" @@ -28,7 +28,7 @@ import ( func TestInstallingNewTracerProvider(t *testing.T) { tpBefore := otel.GetTracerProvider() - m := NewManager(log.NewNopLogger()) + m := NewManager(promslog.NewNopLogger()) cfg := config.Config{ TracingConfig: config.TracingConfig{ Endpoint: "localhost:1234", @@ -41,7 +41,7 @@ func TestInstallingNewTracerProvider(t *testing.T) { } func TestReinstallingTracerProvider(t *testing.T) { - m := NewManager(log.NewNopLogger()) + m := NewManager(promslog.NewNopLogger()) cfg := config.Config{ TracingConfig: config.TracingConfig{ Endpoint: "localhost:1234", @@ -76,7 +76,7 @@ func TestReinstallingTracerProvider(t *testing.T) { } func TestReinstallingTracerProviderWithTLS(t *testing.T) { - m := NewManager(log.NewNopLogger()) + m := NewManager(promslog.NewNopLogger()) cfg := config.Config{ TracingConfig: config.TracingConfig{ Endpoint: "localhost:1234", @@ -96,7 +96,7 @@ func TestReinstallingTracerProviderWithTLS(t *testing.T) { } func TestUninstallingTracerProvider(t *testing.T) { - m := NewManager(log.NewNopLogger()) + m := NewManager(promslog.NewNopLogger()) cfg := config.Config{ TracingConfig: config.TracingConfig{ Endpoint: "localhost:1234", @@ -118,7 +118,7 @@ func TestUninstallingTracerProvider(t *testing.T) { } func TestTracerProviderShutdown(t *testing.T) { - m := NewManager(log.NewNopLogger()) + m := NewManager(promslog.NewNopLogger()) cfg := config.Config{ TracingConfig: config.TracingConfig{ Endpoint: "localhost:1234", diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 5e33fce808..b2c40b2017 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -17,14 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "path/filepath" "sync" "time" "unicode/utf8" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "go.uber.org/atomic" @@ -226,7 +225,7 @@ func (m *dbMetrics) Unregister() { // DB represents a WAL-only storage. It implements storage.DB. type DB struct { mtx sync.RWMutex - logger log.Logger + logger *slog.Logger opts *Options rs *remote.Storage @@ -251,7 +250,7 @@ type DB struct { } // Open returns a new agent.DB in the given directory. -func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir string, opts *Options) (*DB, error) { +func Open(l *slog.Logger, reg prometheus.Registerer, rs *remote.Storage, dir string, opts *Options) (*DB, error) { opts = validateOptions(opts) locker, err := tsdbutil.NewDirLocker(dir, "agent", l, reg) @@ -306,11 +305,11 @@ func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir strin } if err := db.replayWAL(); err != nil { - level.Warn(db.logger).Log("msg", "encountered WAL read error, attempting repair", "err", err) + db.logger.Warn("encountered WAL read error, attempting repair", "err", err) if err := w.Repair(err); err != nil { return nil, fmt.Errorf("repair corrupted WAL: %w", err) } - level.Info(db.logger).Log("msg", "successfully repaired WAL") + db.logger.Info("successfully repaired WAL") } go db.run() @@ -359,7 +358,7 @@ func validateOptions(opts *Options) *Options { } func (db *DB) replayWAL() error { - level.Info(db.logger).Log("msg", "replaying WAL, this may take a while", "dir", db.wal.Dir()) + db.logger.Info("replaying WAL, this may take a while", "dir", db.wal.Dir()) start := time.Now() dir, startFrom, err := wlog.LastCheckpoint(db.wal.Dir()) @@ -376,7 +375,7 @@ func (db *DB) replayWAL() error { } defer func() { if err := sr.Close(); err != nil { - level.Warn(db.logger).Log("msg", "error while closing the wal segments reader", "err", err) + db.logger.Warn("error while closing the wal segments reader", "err", err) } }() @@ -386,7 +385,7 @@ func (db *DB) replayWAL() error { return fmt.Errorf("backfill checkpoint: %w", err) } startFrom++ - level.Info(db.logger).Log("msg", "WAL checkpoint loaded") + db.logger.Info("WAL checkpoint loaded") } // Find the last segment. @@ -405,12 +404,12 @@ func (db *DB) replayWAL() error { sr := wlog.NewSegmentBufReader(seg) err = db.loadWAL(wlog.NewReader(sr), multiRef) if err := sr.Close(); err != nil { - level.Warn(db.logger).Log("msg", "error while closing the wal segments reader", "err", err) + db.logger.Warn("error while closing the wal segments reader", "err", err) } if err != nil { return err } - level.Info(db.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", last) + db.logger.Info("WAL segment loaded", "segment", i, "maxSegment", last) } walReplayDuration := time.Since(start) @@ -571,7 +570,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H } if v := nonExistentSeriesRefs.Load(); v > 0 { - level.Warn(db.logger).Log("msg", "found sample referencing non-existing series", "skipped_series", v) + db.logger.Warn("found sample referencing non-existing series", "skipped_series", v) } db.nextRef.Store(uint64(lastRef)) @@ -616,9 +615,9 @@ Loop: ts = maxTS } - level.Debug(db.logger).Log("msg", "truncating the WAL", "ts", ts) + db.logger.Debug("truncating the WAL", "ts", ts) if err := db.truncate(ts); err != nil { - level.Warn(db.logger).Log("msg", "failed to truncate WAL", "err", err) + db.logger.Warn("failed to truncate WAL", "err", err) } } } @@ -631,7 +630,7 @@ func (db *DB) truncate(mint int64) error { start := time.Now() db.gc(mint) - level.Info(db.logger).Log("msg", "series GC completed", "duration", time.Since(start)) + db.logger.Info("series GC completed", "duration", time.Since(start)) first, last, err := wlog.Segments(db.wal.Dir()) if err != nil { @@ -679,7 +678,7 @@ func (db *DB) truncate(mint int64) error { // If truncating fails, we'll just try it again at the next checkpoint. // Leftover segments will still just be ignored in the future if there's a // checkpoint that supersedes them. - level.Error(db.logger).Log("msg", "truncating segments failed", "err", err) + db.logger.Error("truncating segments failed", "err", err) } // The checkpoint is written and segments before it are truncated, so we @@ -696,13 +695,13 @@ func (db *DB) truncate(mint int64) error { // Leftover old checkpoints do not cause problems down the line beyond // occupying disk space. They will just be ignored since a newer checkpoint // exists. - level.Error(db.logger).Log("msg", "delete old checkpoints", "err", err) + db.logger.Error("delete old checkpoints", "err", err) db.metrics.checkpointDeleteFail.Inc() } db.metrics.walTruncateDuration.Observe(time.Since(start).Seconds()) - level.Info(db.logger).Log("msg", "WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) + db.logger.Info("WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) return nil } diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index f940e19158..4d5fda25db 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -22,11 +22,10 @@ import ( "testing" "time" - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" @@ -89,12 +88,12 @@ func createTestAgentDB(t testing.TB, reg prometheus.Registerer, opts *Options) * t.Helper() dbDir := t.TempDir() - rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false) + rs := remote.NewStorage(promslog.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false) t.Cleanup(func() { require.NoError(t, rs.Close()) }) - db, err := Open(log.NewNopLogger(), reg, rs, dbDir, opts) + db, err := Open(promslog.NewNopLogger(), reg, rs, dbDir, opts) require.NoError(t, err) return db } @@ -583,7 +582,7 @@ func TestWALReplay(t *testing.T) { func TestLockfile(t *testing.T) { tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) { - logger := log.NewNopLogger() + logger := promslog.NewNopLogger() reg := prometheus.NewRegistry() rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false) t.Cleanup(func() { @@ -605,12 +604,12 @@ func TestLockfile(t *testing.T) { func Test_ExistingWAL_NextRef(t *testing.T) { dbDir := t.TempDir() - rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false) + rs := remote.NewStorage(promslog.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false) defer func() { require.NoError(t, rs.Close()) }() - db, err := Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) + db, err := Open(promslog.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) require.NoError(t, err) seriesCount := 10 @@ -638,7 +637,7 @@ func Test_ExistingWAL_NextRef(t *testing.T) { require.NoError(t, db.Close()) // Create a new storage and see what nextRef is initialized to. - db, err = Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) + db, err = Open(promslog.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) diff --git a/tsdb/block.go b/tsdb/block.go index 2f32733f8c..48ba4588aa 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -20,15 +20,16 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "slices" "sync" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -265,7 +266,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) { return &m, int64(len(b)), nil } -func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error) { +func writeMetaFile(logger *slog.Logger, dir string, meta *BlockMeta) (int64, error) { meta.Version = metaVersion1 // Make any changes to the file appear atomic. @@ -273,7 +274,7 @@ func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error tmp := path + ".tmp" defer func() { if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } }() @@ -319,7 +320,7 @@ type Block struct { indexr IndexReader tombstones tombstones.Reader - logger log.Logger + logger *slog.Logger numBytesChunks int64 numBytesIndex int64 @@ -329,9 +330,9 @@ type Block struct { // OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used // to instantiate chunk structs. -func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { +func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } var closers []io.Closer defer func() { diff --git a/tsdb/block_test.go b/tsdb/block_test.go index bd86b27814..3589b42c17 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -27,8 +27,8 @@ import ( "strconv" "testing" - "github.com/go-kit/log" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" @@ -47,7 +47,7 @@ import ( func TestBlockMetaMustNeverBeVersion2(t *testing.T) { dir := t.TempDir() - _, err := writeMetaFile(log.NewNopLogger(), dir, &BlockMeta{}) + _, err := writeMetaFile(promslog.NewNopLogger(), dir, &BlockMeta{}) require.NoError(t, err) meta, _, err := readMetaFile(dir) @@ -372,7 +372,7 @@ func TestBlockSize(t *testing.T) { require.NoError(t, err) require.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size") - c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil) + c, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{0}, nil, nil) require.NoError(t, err) blockDirsAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil) require.NoError(t, err) @@ -621,13 +621,13 @@ func testPostingsForLabelMatching(t *testing.T, offset storage.SeriesRef, setUp // createBlock creates a block with given set of series and returns its dir. func createBlock(tb testing.TB, dir string, series []storage.Series) string { - blockDir, err := CreateBlock(series, dir, 0, log.NewNopLogger()) + blockDir, err := CreateBlock(series, dir, 0, promslog.NewNopLogger()) require.NoError(tb, err) return blockDir } func createBlockFromHead(tb testing.TB, dir string, head *Head) string { - compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil) + compactor, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{1000000}, nil, nil) require.NoError(tb, err) require.NoError(tb, os.MkdirAll(dir, 0o777)) @@ -641,7 +641,7 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string { } func createBlockFromOOOHead(tb testing.TB, dir string, head *OOOCompactionHead) string { - compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil) + compactor, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{1000000}, nil, nil) require.NoError(tb, err) require.NoError(tb, os.MkdirAll(dir, 0o777)) diff --git a/tsdb/blockwriter.go b/tsdb/blockwriter.go index 232ec2b914..63f82e28df 100644 --- a/tsdb/blockwriter.go +++ b/tsdb/blockwriter.go @@ -17,11 +17,10 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "os" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/prometheus/model/timestamp" @@ -31,7 +30,7 @@ import ( // BlockWriter is a block writer that allows appending and flushing series to disk. type BlockWriter struct { - logger log.Logger + logger *slog.Logger destinationDir string head *Head @@ -50,7 +49,7 @@ var ErrNoSeriesAppended = errors.New("no series appended, aborting") // contains anything at all. It is the caller's responsibility to // ensure that the resulting blocks do not overlap etc. // Writer ensures the block flush is atomic (via rename). -func NewBlockWriter(logger log.Logger, dir string, blockSize int64) (*BlockWriter, error) { +func NewBlockWriter(logger *slog.Logger, dir string, blockSize int64) (*BlockWriter, error) { w := &BlockWriter{ logger: logger, destinationDir: dir, @@ -95,7 +94,7 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. maxt := w.head.MaxTime() + 1 - level.Info(w.logger).Log("msg", "flushing", "series_count", w.head.NumSeries(), "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt)) + w.logger.Info("flushing", "series_count", w.head.NumSeries(), "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt)) compactor, err := NewLeveledCompactor(ctx, nil, @@ -121,7 +120,7 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { func (w *BlockWriter) Close() error { defer func() { if err := os.RemoveAll(w.chunkDir); err != nil { - level.Error(w.logger).Log("msg", "error in deleting BlockWriter files", "err", err) + w.logger.Error("error in deleting BlockWriter files", "err", err) } }() return w.head.Close() diff --git a/tsdb/blockwriter_test.go b/tsdb/blockwriter_test.go index d8240b53c6..4ec25df70a 100644 --- a/tsdb/blockwriter_test.go +++ b/tsdb/blockwriter_test.go @@ -19,9 +19,10 @@ import ( "path/filepath" "testing" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" ) @@ -29,7 +30,7 @@ import ( func TestBlockWriter(t *testing.T) { ctx := context.Background() outputDir := t.TempDir() - w, err := NewBlockWriter(log.NewNopLogger(), outputDir, DefaultBlockDuration) + w, err := NewBlockWriter(promslog.NewNopLogger(), outputDir, DefaultBlockDuration) require.NoError(t, err) // Add some series. diff --git a/tsdb/compact.go b/tsdb/compact.go index 9ef42b339b..ff35679e3f 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -19,15 +19,15 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "slices" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -80,7 +80,7 @@ type Compactor interface { // LeveledCompactor implements the Compactor interface. type LeveledCompactor struct { metrics *CompactorMetrics - logger log.Logger + logger *slog.Logger ranges []int64 chunkPool chunkenc.Pool ctx context.Context @@ -167,7 +167,7 @@ type LeveledCompactorOptions struct { EnableOverlappingCompaction bool } -func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { +func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MaxBlockChunkSegmentSize: maxBlockChunkSegmentSize, MergeFunc: mergeFunc, @@ -175,14 +175,14 @@ func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Register }) } -func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { +func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MergeFunc: mergeFunc, EnableOverlappingCompaction: true, }) } -func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { +func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { if len(ranges) == 0 { return nil, fmt.Errorf("at least one range must be provided") } @@ -190,7 +190,7 @@ func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer pool = chunkenc.NewPool() } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } mergeFunc := opts.MergeFunc if mergeFunc == nil { @@ -500,15 +500,15 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, b.meta.Compaction.Deletable = true n, err := writeMetaFile(c.logger, b.dir, &b.meta) if err != nil { - level.Error(c.logger).Log( - "msg", "Failed to write 'Deletable' to meta file after compaction", + c.logger.Error( + "Failed to write 'Deletable' to meta file after compaction", "ulid", b.meta.ULID, ) } b.numBytesMeta = n } - level.Info(c.logger).Log( - "msg", "compact blocks resulted in empty block", + c.logger.Info( + "compact blocks resulted in empty block", "count", len(blocks), "sources", fmt.Sprintf("%v", uids), "duration", time.Since(start), @@ -516,8 +516,8 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, return nil, nil } - level.Info(c.logger).Log( - "msg", "compact blocks", + c.logger.Info( + "compact blocks", "count", len(blocks), "mint", meta.MinTime, "maxt", meta.MaxTime, @@ -568,8 +568,8 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b } if meta.Stats.NumSamples == 0 { - level.Info(c.logger).Log( - "msg", "write block resulted in empty block", + c.logger.Info( + "write block resulted in empty block", "mint", meta.MinTime, "maxt", meta.MaxTime, "duration", time.Since(start), @@ -577,8 +577,8 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b return nil, nil } - level.Info(c.logger).Log( - "msg", "write block", + c.logger.Info( + "write block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, @@ -617,7 +617,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl // RemoveAll returns no error when tmp doesn't exist so it is safe to always run it. if err := os.RemoveAll(tmp); err != nil { - level.Error(c.logger).Log("msg", "removed tmp folder after failed compaction", "err", err.Error()) + c.logger.Error("removed tmp folder after failed compaction", "err", err.Error()) } c.metrics.Ran.Inc() c.metrics.Duration.Observe(time.Since(t).Seconds()) @@ -722,7 +722,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl } type BlockPopulator interface { - PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) error + PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) error } // IndexReaderPostingsFunc is a function to get a sorted posting iterator from a given index reader. @@ -743,7 +743,7 @@ type DefaultBlockPopulator struct{} // PopulateBlock fills the index and chunk writers with new data gathered as the union // of the provided blocks. It returns meta information for the new block. // It expects sorted blocks input by mint. -func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) { +func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block from no readers") } @@ -776,7 +776,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa if i > 0 && b.Meta().MinTime < globalMaxt { metrics.OverlappingBlocks.Inc() overlapping = true - level.Info(logger).Log("msg", "Found overlapping blocks during compaction", "ulid", meta.ULID) + logger.Info("Found overlapping blocks during compaction", "ulid", meta.ULID) } if b.Meta().MaxTime > globalMaxt { globalMaxt = b.Meta().MaxTime diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index d69b70d204..5123d6e624 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -28,9 +28,9 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/oklog/ulid" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" @@ -434,7 +434,7 @@ func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) { } func TestCompactionFailWillCleanUpTempDir(t *testing.T) { - compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{ + compactor, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{ 20, 60, 240, @@ -1162,7 +1162,7 @@ func BenchmarkCompaction(b *testing.B) { blockDirs = append(blockDirs, block.Dir()) } - c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil) + c, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{0}, nil, nil) require.NoError(b, err) b.ResetTimer() @@ -1318,7 +1318,7 @@ func TestCancelCompactions(t *testing.T) { // Measure the compaction time without interrupting it. var timeCompactionUninterrupted time.Duration { - db, err := open(tmpdir, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) + db, err := open(tmpdir, promslog.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) require.NoError(t, err) require.Len(t, db.Blocks(), 3, "initial block count mismatch") require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") @@ -1337,7 +1337,7 @@ func TestCancelCompactions(t *testing.T) { } // Measure the compaction time when closing the db in the middle of compaction. { - db, err := open(tmpdirCopy, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) + db, err := open(tmpdirCopy, promslog.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) require.NoError(t, err) require.Len(t, db.Blocks(), 3, "initial block count mismatch") require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") @@ -1358,7 +1358,7 @@ func TestCancelCompactions(t *testing.T) { // This checks that the `context.Canceled` error is properly checked at all levels: // - tsdb_errors.NewMulti() should have the Is() method implemented for correct checks. // - callers should check with errors.Is() instead of ==. - readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, "", log.NewNopLogger()) + readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, "", promslog.NewNopLogger()) require.NoError(t, err) blocks, err := readOnlyDB.Blocks() require.NoError(t, err) @@ -1917,7 +1917,7 @@ func TestCompactEmptyResultBlockWithTombstone(t *testing.T) { err = block.Delete(ctx, 0, 10, labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "0")) require.NoError(t, err) - c, err := NewLeveledCompactor(ctx, nil, log.NewNopLogger(), []int64{0}, nil, nil) + c, err := NewLeveledCompactor(ctx, nil, promslog.NewNopLogger(), []int64{0}, nil, nil) require.NoError(t, err) ulids, err := c.Compact(tmpdir, []string{blockDir}, []*Block{block}) @@ -2121,7 +2121,7 @@ func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) { options := DefaultOptions() // This will make the test timeout if compaction really waits for it. options.CompactionDelay = time.Hour - db, err := open(tmpdir, log.NewNopLogger(), nil, options, []int64{10, 200}, nil) + db, err := open(tmpdir, promslog.NewNopLogger(), nil, options, []int64{10, 200}, nil) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) diff --git a/tsdb/db.go b/tsdb/db.go index 3b1dee27d4..6ebef3f52f 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "io/fs" + "log/slog" "math" "math/rand" "os" @@ -29,10 +30,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "go.uber.org/atomic" "golang.org/x/sync/errgroup" @@ -215,7 +215,7 @@ type Options struct { BlockChunkQuerierFunc BlockChunkQuerierFunc } -type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) +type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} @@ -229,7 +229,7 @@ type DB struct { dir string locker *tsdbutil.DirLocker - logger log.Logger + logger *slog.Logger metrics *dbMetrics opts *Options chunkPool chunkenc.Pool @@ -420,7 +420,7 @@ var ErrClosed = errors.New("db already closed") // Current implementation doesn't support concurrency so // all API calls should happen in the same go routine. type DBReadOnly struct { - logger log.Logger + logger *slog.Logger dir string sandboxDir string closers []io.Closer @@ -428,7 +428,7 @@ type DBReadOnly struct { } // OpenDBReadOnly opens DB in the given directory for read only operations. -func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, error) { +func OpenDBReadOnly(dir, sandboxDirRoot string, l *slog.Logger) (*DBReadOnly, error) { if _, err := os.Stat(dir); err != nil { return nil, fmt.Errorf("opening the db dir: %w", err) } @@ -442,7 +442,7 @@ func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, erro } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } return &DBReadOnly{ @@ -641,7 +641,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { if len(corrupted) > 0 { for _, b := range loadable { if err := b.Close(); err != nil { - level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", b) + db.logger.Warn("Closing block failed", "err", err, "block", b) } } errs := tsdb_errors.NewMulti() @@ -673,7 +673,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during opening", "detail", overlaps.String()) + db.logger.Warn("Overlapping blocks found during opening", "detail", overlaps.String()) } // Close all previously open readers and add the new ones to the cache. @@ -751,7 +751,7 @@ func (db *DBReadOnly) Close() error { defer func() { // Delete the temporary sandbox directory that was created when opening the DB. if err := os.RemoveAll(db.sandboxDir); err != nil { - level.Error(db.logger).Log("msg", "delete sandbox dir", "err", err) + db.logger.Error("delete sandbox dir", "err", err) } }() select { @@ -765,7 +765,7 @@ func (db *DBReadOnly) Close() error { } // Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used. -func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, stats *DBStats) (db *DB, err error) { +func Open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, stats *DBStats) (db *DB, err error) { var rngs []int64 opts, rngs = validateOpts(opts, nil) @@ -815,12 +815,12 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { // open returns a new DB in the given directory. // It initializes the lockfile, WAL, compactor, and Head (by replaying the WAL), and runs the database. // It is not safe to open more than one DB in the same directory. -func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { +func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if stats == nil { stats = NewDBStats() @@ -998,17 +998,17 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.head.metrics.walCorruptionsTotal.Inc() var e *errLoadWbl if errors.As(initErr, &e) { - level.Warn(db.logger).Log("msg", "Encountered WBL read error, attempting repair", "err", initErr) + db.logger.Warn("Encountered WBL read error, attempting repair", "err", initErr) if err := wbl.Repair(e.err); err != nil { return nil, fmt.Errorf("repair corrupted WBL: %w", err) } - level.Info(db.logger).Log("msg", "Successfully repaired WBL") + db.logger.Info("Successfully repaired WBL") } else { - level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) + db.logger.Warn("Encountered WAL read error, attempting repair", "err", initErr) if err := wal.Repair(initErr); err != nil { return nil, fmt.Errorf("repair corrupted WAL: %w", err) } - level.Info(db.logger).Log("msg", "Successfully repaired WAL") + db.logger.Info("Successfully repaired WAL") } } @@ -1026,7 +1026,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs return db, nil } -func removeBestEffortTmpDirs(l log.Logger, dir string) error { +func removeBestEffortTmpDirs(l *slog.Logger, dir string) error { files, err := os.ReadDir(dir) if os.IsNotExist(err) { return nil @@ -1037,10 +1037,10 @@ func removeBestEffortTmpDirs(l log.Logger, dir string) error { for _, f := range files { if isTmpDir(f) { if err := os.RemoveAll(filepath.Join(dir, f.Name())); err != nil { - level.Error(l).Log("msg", "failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err) + l.Error("failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err) continue } - level.Info(l).Log("msg", "Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name())) + l.Info("Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name())) } } return nil @@ -1078,7 +1078,7 @@ func (db *DB) run(ctx context.Context) { case <-time.After(1 * time.Minute): db.cmtx.Lock() if err := db.reloadBlocks(); err != nil { - level.Error(db.logger).Log("msg", "reloadBlocks", "err", err) + db.logger.Error("reloadBlocks", "err", err) } db.cmtx.Unlock() @@ -1094,7 +1094,7 @@ func (db *DB) run(ctx context.Context) { db.autoCompactMtx.Lock() if db.autoCompact { if err := db.Compact(ctx); err != nil { - level.Error(db.logger).Log("msg", "compaction failed", "err", err) + db.logger.Error("compaction failed", "err", err) backoff = exponential(backoff, 1*time.Second, 1*time.Minute) } else { backoff = 0 @@ -1308,8 +1308,8 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { compactionDuration := time.Since(start) if compactionDuration.Milliseconds() > db.head.chunkRange.Load() { - level.Warn(db.logger).Log( - "msg", "Head compaction took longer than the block time range, compactions are falling behind and won't be able to catch up", + db.logger.Warn( + "Head compaction took longer than the block time range, compactions are falling behind and won't be able to catch up", "duration", compactionDuration.String(), "block_range", db.head.chunkRange.Load(), ) @@ -1433,15 +1433,15 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID } if len(ulids) == 0 { - level.Info(db.logger).Log( - "msg", "compact ooo head resulted in no blocks", + db.logger.Info( + "compact ooo head resulted in no blocks", "duration", time.Since(start), ) return nil, nil } - level.Info(db.logger).Log( - "msg", "out-of-order compaction completed", + db.logger.Info( + "out-of-order compaction completed", "duration", time.Since(start), "ulids", fmt.Sprintf("%v", ulids), ) @@ -1483,7 +1483,7 @@ func (db *DB) compactBlocks() (err error) { // long enough that we end up with a HEAD block that needs to be written. // Check if that's the case and stop compactions early. if db.head.compactable() && !db.waitingForCompactionDelay() { - level.Warn(db.logger).Log("msg", "aborting block compactions to persit the head block") + db.logger.Warn("aborting block compactions to persit the head block") return nil } @@ -1579,7 +1579,7 @@ func (db *DB) reloadBlocks() (err error) { for _, b := range block.Meta().Compaction.Parents { if _, ok := corrupted[b.ULID]; ok { delete(corrupted, b.ULID) - level.Warn(db.logger).Log("msg", "Found corrupted block, but replaced by compacted one so it's safe to delete. This should not happen with atomic deletes.", "block", b.ULID) + db.logger.Warn("Found corrupted block, but replaced by compacted one so it's safe to delete. This should not happen with atomic deletes.", "block", b.ULID) } deletable[b.ULID] = nil } @@ -1641,7 +1641,7 @@ func (db *DB) reloadBlocks() (err error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) + db.logger.Warn("Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) } } @@ -1657,7 +1657,7 @@ func (db *DB) reloadBlocks() (err error) { return nil } -func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { +func openBlocks(l *slog.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { return nil, nil, fmt.Errorf("find blocks: %w", err) @@ -1667,7 +1667,7 @@ func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Po for _, bDir := range bDirs { meta, _, err := readMetaFile(bDir) if err != nil { - level.Error(l).Log("msg", "Failed to read meta.json for a block during reloadBlocks. Skipping", "dir", bDir, "err", err) + l.Error("Failed to read meta.json for a block during reloadBlocks. Skipping", "dir", bDir, "err", err) continue } @@ -1784,7 +1784,7 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { for ulid, block := range blocks { if block != nil { if err := block.Close(); err != nil { - level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", ulid) + db.logger.Warn("Closing block failed", "err", err, "block", ulid) } } @@ -1805,7 +1805,7 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { if err := os.RemoveAll(tmpToDelete); err != nil { return fmt.Errorf("delete obsolete block %s: %w", ulid, err) } - level.Info(db.logger).Log("msg", "Deleting obsolete block", "block", ulid) + db.logger.Info("Deleting obsolete block", "block", ulid) } return nil @@ -1973,7 +1973,7 @@ func (db *DB) DisableCompactions() { defer db.autoCompactMtx.Unlock() db.autoCompact = false - level.Info(db.logger).Log("msg", "Compactions disabled") + db.logger.Info("Compactions disabled") } // EnableCompactions enables auto compactions. @@ -1982,7 +1982,7 @@ func (db *DB) EnableCompactions() { defer db.autoCompactMtx.Unlock() db.autoCompact = true - level.Info(db.logger).Log("msg", "Compactions enabled") + db.logger.Info("Compactions enabled") } func (db *DB) generateCompactionDelay() time.Duration { @@ -2012,7 +2012,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { defer db.mtx.RUnlock() for _, b := range db.blocks { - level.Info(db.logger).Log("msg", "Snapshotting block", "block", b) + db.logger.Info("Snapshotting block", "block", b) if err := b.Snapshot(dir); err != nil { return fmt.Errorf("error snapshotting block: %s: %w", b.Dir(), err) @@ -2273,7 +2273,7 @@ func (db *DB) CleanTombstones() (err error) { for _, uid := range uids { dir := filepath.Join(db.Dir(), uid.String()) if err := os.RemoveAll(dir); err != nil { - level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) + db.logger.Error("failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) } } if err != nil { diff --git a/tsdb/db_test.go b/tsdb/db_test.go index ef96377483..8c216956d6 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -20,6 +20,7 @@ import ( "flag" "fmt" "hash/crc32" + "log/slog" "math" "math/rand" "os" @@ -32,10 +33,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" "go.uber.org/goleak" @@ -1126,7 +1127,7 @@ func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBefore require.NoError(t, db.Close()) // Reopen the DB, replaying the WAL. - reopenDB, err := Open(db.Dir(), log.NewLogfmtLogger(os.Stderr), nil, nil, nil) + reopenDB, err := Open(db.Dir(), promslog.New(&promslog.Config{}), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, reopenDB.Close()) @@ -1595,7 +1596,7 @@ func TestSizeRetention(t *testing.T) { // Create a WAL checkpoint, and compare sizes. first, last, err := wlog.Segments(db.Head().wal.Dir()) require.NoError(t, err) - _, err = wlog.Checkpoint(log.NewNopLogger(), db.Head().wal, first, last-1, func(x chunks.HeadSeriesRef) bool { return false }, 0) + _, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(x chunks.HeadSeriesRef) bool { return false }, 0) require.NoError(t, err) blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics. walSize, err = db.Head().wal.Size() @@ -2336,7 +2337,7 @@ func TestCorrectNumTombstones(t *testing.T) { // This ensures that a snapshot that includes the head and creates a block with a custom time range // will not overlap with the first block created by the next compaction. func TestBlockRanges(t *testing.T) { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger := promslog.New(&promslog.Config{}) ctx := context.Background() dir := t.TempDir() @@ -2421,7 +2422,7 @@ func TestBlockRanges(t *testing.T) { func TestDBReadOnly(t *testing.T) { var ( dbDir string - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = promslog.New(&promslog.Config{}) expBlocks []*Block expBlock *Block expSeries map[string][]chunks.Sample @@ -2539,7 +2540,7 @@ func TestDBReadOnly(t *testing.T) { // all api methods return an ErrClosed. func TestDBReadOnlyClosing(t *testing.T) { sandboxDir := t.TempDir() - db, err := OpenDBReadOnly(t.TempDir(), sandboxDir, log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))) + db, err := OpenDBReadOnly(t.TempDir(), sandboxDir, promslog.New(&promslog.Config{})) require.NoError(t, err) // The sandboxDir was there. require.DirExists(t, db.sandboxDir) @@ -2556,7 +2557,7 @@ func TestDBReadOnlyClosing(t *testing.T) { func TestDBReadOnly_FlushWAL(t *testing.T) { var ( dbDir string - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = promslog.New(&promslog.Config{}) err error maxt int ctx = context.Background() @@ -3101,7 +3102,7 @@ func TestCompactHead(t *testing.T) { WALCompression: wlog.CompressionSnappy, } - db, err := Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) + db, err := Open(dbDir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) require.NoError(t, err) ctx := context.Background() app := db.Appender(ctx) @@ -3122,7 +3123,7 @@ func TestCompactHead(t *testing.T) { // Delete everything but the new block and // reopen the db to query it to ensure it includes the head data. require.NoError(t, deleteNonBlocks(db.Dir())) - db, err = Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) + db, err = Open(dbDir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) require.NoError(t, err) require.Len(t, db.Blocks(), 1) require.Equal(t, int64(maxt), db.Head().MinTime()) @@ -3149,7 +3150,7 @@ func TestCompactHead(t *testing.T) { // TestCompactHeadWithDeletion tests https://github.com/prometheus/prometheus/issues/11585. func TestCompactHeadWithDeletion(t *testing.T) { - db, err := Open(t.TempDir(), log.NewNopLogger(), prometheus.NewRegistry(), nil, nil) + db, err := Open(t.TempDir(), promslog.NewNopLogger(), prometheus.NewRegistry(), nil, nil) require.NoError(t, err) ctx := context.Background() @@ -3262,7 +3263,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { // Regression test: Already removed parent can be still in list, which was causing Open errors. m.Compaction.Parents = append(m.Compaction.Parents, BlockDesc{ULID: ulid.MustParse(filepath.Base(compacted))}) m.Compaction.Parents = append(m.Compaction.Parents, BlockDesc{ULID: ulid.MustParse(filepath.Base(compacted))}) - _, err = writeMetaFile(log.NewLogfmtLogger(os.Stderr), dir, m) + _, err = writeMetaFile(promslog.New(&promslog.Config{}), dir, m) require.NoError(t, err) } tmpCheckpointDir := path.Join(tmpDir, "wal/checkpoint.00000001.tmp") @@ -3274,7 +3275,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { opts := DefaultOptions() opts.RetentionDuration = 0 - db, err := Open(tmpDir, log.NewLogfmtLogger(os.Stderr), nil, opts, nil) + db, err := Open(tmpDir, promslog.New(&promslog.Config{}), nil, opts, nil) require.NoError(t, err) loadedBlocks := db.Blocks() @@ -3318,7 +3319,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { tmpDir := t.TempDir() ctx := context.Background() - db, err := Open(tmpDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) + db, err := Open(tmpDir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) @@ -3380,7 +3381,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { createBlock(t, db.dir, genSeries(1, 1, newBlockMint, newBlockMaxt)) - db, err = Open(db.dir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) + db, err = Open(db.dir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) require.NoError(t, err) db.DisableCompactions() @@ -3429,7 +3430,7 @@ func TestNoPanicOnTSDBOpenError(t *testing.T) { tmpdir := t.TempDir() // Taking the lock will cause a TSDB startup error. - l, err := tsdbutil.NewDirLocker(tmpdir, "tsdb", log.NewNopLogger(), nil) + l, err := tsdbutil.NewDirLocker(tmpdir, "tsdb", promslog.NewNopLogger(), nil) require.NoError(t, err) require.NoError(t, l.Lock()) @@ -4584,7 +4585,7 @@ func TestMetadataCheckpointingOnlyKeepsLatestEntry(t *testing.T) { keep := func(id chunks.HeadSeriesRef) bool { return id != 3 } - _, err = wlog.Checkpoint(log.NewNopLogger(), w, first, last-1, keep, 0) + _, err = wlog.Checkpoint(promslog.NewNopLogger(), w, first, last-1, keep, 0) require.NoError(t, err) // Confirm there's been a checkpoint. @@ -6553,7 +6554,7 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) { resetMmapToOriginal() // We neet to reset because new duplicate chunks can be written above. // Removing m-map markers in WBL by rewriting it. - newWbl, err := wlog.New(log.NewNopLogger(), nil, filepath.Join(t.TempDir(), "new_wbl"), wlog.CompressionNone) + newWbl, err := wlog.New(promslog.NewNopLogger(), nil, filepath.Join(t.TempDir(), "new_wbl"), wlog.CompressionNone) require.NoError(t, err) sr, err := wlog.NewSegmentsReader(originalWblDir) require.NoError(t, err) @@ -8730,7 +8731,7 @@ func TestNewCompactorFunc(t *testing.T) { opts := DefaultOptions() block1 := ulid.MustNew(1, nil) block2 := ulid.MustNew(2, nil) - opts.NewCompactorFunc = func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) { + opts.NewCompactorFunc = func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) { return &mockCompactorFn{ planFn: func() ([]string, error) { return []string{block1.String(), block2.String()}, nil diff --git a/tsdb/head.go b/tsdb/head.go index f469e5e345..2963d781d0 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "path/filepath" "runtime" @@ -25,12 +26,11 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "go.uber.org/atomic" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -84,7 +84,7 @@ type Head struct { wal, wbl *wlog.WL exemplarMetrics *ExemplarMetrics exemplars ExemplarStorage - logger log.Logger + logger *slog.Logger appendPool zeropool.Pool[[]record.RefSample] exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef] histogramsPool zeropool.Pool[[]record.RefHistogramSample] @@ -227,10 +227,10 @@ type SeriesLifecycleCallback interface { } // NewHead opens the head block in dir. -func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) { +func NewHead(r prometheus.Registerer, l *slog.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) { var err error if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if opts.OutOfOrderTimeWindow.Load() < 0 { @@ -566,7 +566,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { }, func() float64 { val, err := h.chunkDiskMapper.Size() if err != nil { - level.Error(h.logger).Log("msg", "Failed to calculate size of \"chunks_head\" dir", + h.logger.Error("Failed to calculate size of \"chunks_head\" dir", "err", err.Error()) } return float64(val) @@ -629,7 +629,7 @@ func (h *Head) Init(minValidTime int64) error { } }() - level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any") + h.logger.Info("Replaying on-disk memory mappable chunks if any") start := time.Now() snapIdx, snapOffset := -1, 0 @@ -638,7 +638,7 @@ func (h *Head) Init(minValidTime int64) error { snapshotLoaded := false var chunkSnapshotLoadDuration time.Duration if h.opts.EnableMemorySnapshotOnShutdown { - level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot") + h.logger.Info("Chunk snapshot is enabled, replaying from the snapshot") // If there are any WAL files, there should be at least one WAL file with an index that is current or newer // than the snapshot index. If the WAL index is behind the snapshot index somehow, the snapshot is assumed // to be outdated. @@ -651,14 +651,14 @@ func (h *Head) Init(minValidTime int64) error { _, idx, _, err := LastChunkSnapshot(h.opts.ChunkDirRoot) if err != nil && !errors.Is(err, record.ErrNotFound) { - level.Error(h.logger).Log("msg", "Could not find last snapshot", "err", err) + h.logger.Error("Could not find last snapshot", "err", err) } if err == nil && endAt < idx { loadSnapshot = false - level.Warn(h.logger).Log("msg", "Last WAL file is behind snapshot, removing snapshots") + h.logger.Warn("Last WAL file is behind snapshot, removing snapshots") if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, math.MaxInt, math.MaxInt); err != nil { - level.Error(h.logger).Log("msg", "Error while deleting snapshot directories", "err", err) + h.logger.Error("Error while deleting snapshot directories", "err", err) } } } @@ -668,14 +668,14 @@ func (h *Head) Init(minValidTime int64) error { if err == nil { snapshotLoaded = true chunkSnapshotLoadDuration = time.Since(start) - level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String()) + h.logger.Info("Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String()) } if err != nil { snapIdx, snapOffset = -1, 0 refSeries = make(map[chunks.HeadSeriesRef]*memSeries) h.metrics.snapshotReplayErrorTotal.Inc() - level.Error(h.logger).Log("msg", "Failed to load chunk snapshot", "err", err) + h.logger.Error("Failed to load chunk snapshot", "err", err) // We clear the partially loaded data to replay fresh from the WAL. if err := h.resetInMemoryState(); err != nil { return err @@ -699,7 +699,7 @@ func (h *Head) Init(minValidTime int64) error { mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.loadMmappedChunks(refSeries) if err != nil { // TODO(codesome): clear out all m-map chunks here for refSeries. - level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) + h.logger.Error("Loading on-disk chunks failed", "err", err) var cerr *chunks.CorruptionErr if errors.As(err, &cerr) { h.metrics.mmapChunkCorruptionTotal.Inc() @@ -716,15 +716,15 @@ func (h *Head) Init(minValidTime int64) error { } } mmapChunkReplayDuration = time.Since(mmapChunkReplayStart) - level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String()) + h.logger.Info("On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String()) } if h.wal == nil { - level.Info(h.logger).Log("msg", "WAL not found") + h.logger.Info("WAL not found") return nil } - level.Info(h.logger).Log("msg", "Replaying WAL, this may take a while") + h.logger.Info("Replaying WAL, this may take a while") checkpointReplayStart := time.Now() // Backfill the checkpoint first if it exists. @@ -750,7 +750,7 @@ func (h *Head) Init(minValidTime int64) error { } defer func() { if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) + h.logger.Warn("Error while closing the wal segments reader", "err", err) } }() @@ -761,7 +761,7 @@ func (h *Head) Init(minValidTime int64) error { } h.updateWALReplayStatusRead(startFrom) startFrom++ - level.Info(h.logger).Log("msg", "WAL checkpoint loaded") + h.logger.Info("WAL checkpoint loaded") } checkpointReplayDuration := time.Since(checkpointReplayStart) @@ -791,12 +791,12 @@ func (h *Head) Init(minValidTime int64) error { } err = h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks) if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) + h.logger.Warn("Error while closing the wal segments reader", "err", err) } if err != nil { return err } - level.Info(h.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", endAt) + h.logger.Info("WAL segment loaded", "segment", i, "maxSegment", endAt) h.updateWALReplayStatusRead(i) } walReplayDuration := time.Since(walReplayStart) @@ -819,12 +819,12 @@ func (h *Head) Init(minValidTime int64) error { sr := wlog.NewSegmentBufReader(s) err = h.loadWBL(wlog.NewReader(sr), syms, multiRef, lastMmapRef) if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wbl segments reader", "err", err) + h.logger.Warn("Error while closing the wbl segments reader", "err", err) } if err != nil { return &errLoadWbl{err} } - level.Info(h.logger).Log("msg", "WBL segment loaded", "segment", i, "maxSegment", endAt) + h.logger.Info("WBL segment loaded", "segment", i, "maxSegment", endAt) h.updateWALReplayStatusRead(i) } } @@ -833,8 +833,8 @@ func (h *Head) Init(minValidTime int64) error { totalReplayDuration := time.Since(start) h.metrics.dataTotalReplayDuration.Set(totalReplayDuration.Seconds()) - level.Info(h.logger).Log( - "msg", "WAL replay completed", + h.logger.Info( + "WAL replay completed", "checkpoint_replay_duration", checkpointReplayDuration.String(), "wal_replay_duration", walReplayDuration.String(), "wbl_replay_duration", wblReplayDuration.String(), @@ -944,28 +944,28 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) // removeCorruptedMmappedChunks attempts to delete the corrupted mmapped chunks and if it fails, it clears all the previously // loaded mmapped chunks. func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef][]*mmappedChunk, map[chunks.HeadSeriesRef][]*mmappedChunk, chunks.ChunkDiskMapperRef, error) { - level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") + h.logger.Info("Deleting mmapped chunk files") // We never want to preserve the in-memory series from snapshots if we are repairing m-map chunks. if err := h.resetInMemoryState(); err != nil { return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, err } - level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") + h.logger.Info("Deleting mmapped chunk files") if err := h.chunkDiskMapper.DeleteCorrupted(err); err != nil { - level.Info(h.logger).Log("msg", "Deletion of corrupted mmap chunk files failed, discarding chunk files completely", "err", err) + h.logger.Info("Deletion of corrupted mmap chunk files failed, discarding chunk files completely", "err", err) if err := h.chunkDiskMapper.Truncate(math.MaxUint32); err != nil { - level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed", "err", err) + h.logger.Error("Deletion of all mmap chunk files failed", "err", err) } return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, nil } - level.Info(h.logger).Log("msg", "Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") + h.logger.Info("Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") mmappedChunks, oooMmappedChunks, lastRef, err := h.loadMmappedChunks(make(map[chunks.HeadSeriesRef]*memSeries)) if err != nil { - level.Error(h.logger).Log("msg", "Loading on-disk chunks failed, discarding chunk files completely", "err", err) + h.logger.Error("Loading on-disk chunks failed, discarding chunk files completely", "err", err) if err := h.chunkDiskMapper.Truncate(math.MaxUint32); err != nil { - level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed after failed loading", "err", err) + h.logger.Error("Deletion of all mmap chunk files failed after failed loading", "err", err) } mmappedChunks = map[chunks.HeadSeriesRef][]*mmappedChunk{} } @@ -1000,7 +1000,7 @@ func (h *Head) ApplyConfig(cfg *config.Config, wbl *wlog.WL) { } migrated := h.exemplars.(*CircularExemplarStorage).Resize(newSize) - level.Info(h.logger).Log("msg", "Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) + h.logger.Info("Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) } // SetOutOfOrderTimeWindow updates the out of order related parameters. @@ -1311,7 +1311,7 @@ func (h *Head) truncateWAL(mint int64) error { // If truncating fails, we'll just try again at the next checkpoint. // Leftover segments will just be ignored in the future if there's a checkpoint // that supersedes them. - level.Error(h.logger).Log("msg", "truncating segments failed", "err", err) + h.logger.Error("truncating segments failed", "err", err) } // The checkpoint is written and segments before it is truncated, so we no @@ -1329,12 +1329,12 @@ func (h *Head) truncateWAL(mint int64) error { // Leftover old checkpoints do not cause problems down the line beyond // occupying disk space. // They will just be ignored since a higher checkpoint exists. - level.Error(h.logger).Log("msg", "delete old checkpoints", "err", err) + h.logger.Error("delete old checkpoints", "err", err) h.metrics.checkpointDeleteFail.Inc() } h.metrics.walTruncateDuration.Observe(time.Since(start).Seconds()) - level.Info(h.logger).Log("msg", "WAL checkpoint complete", + h.logger.Info("WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) return nil @@ -1372,7 +1372,7 @@ func (h *Head) truncateSeriesAndChunkDiskMapper(caller string) error { start := time.Now() headMaxt := h.MaxTime() actualMint, minOOOTime, minMmapFile := h.gc() - level.Info(h.logger).Log("msg", "Head GC completed", "caller", caller, "duration", time.Since(start)) + h.logger.Info("Head GC completed", "caller", caller, "duration", time.Since(start)) h.metrics.gcDuration.Observe(time.Since(start).Seconds()) if actualMint > h.minTime.Load() { @@ -1524,7 +1524,7 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match series := h.series.getByID(chunks.HeadSeriesRef(p.At())) if series == nil { - level.Debug(h.logger).Log("msg", "Series not found in Head.Delete") + h.logger.Debug("Series not found in Head.Delete") continue } diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 10fb17809b..b385758cac 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -17,11 +17,9 @@ import ( "context" "errors" "fmt" + "log/slog" "math" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -1009,7 +1007,7 @@ func (a *headAppender) Commit() (err error) { if errors.Is(err, storage.ErrOutOfOrderExemplar) { continue } - level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) + a.head.logger.Debug("Unknown error while adding exemplar", "err", err) } } @@ -1421,14 +1419,14 @@ func (a *headAppender) Commit() (err error) { // until we have found what samples become OOO. We can try having a metric for this failure. // Returning the error here is not correct because we have already put the samples into the memory, // hence the append/insert was a success. - level.Error(a.head.logger).Log("msg", "Failed to log out of order samples into the WAL", "err", err) + a.head.logger.Error("Failed to log out of order samples into the WAL", "err", err) } } return nil } // insert is like append, except it inserts. Used for OOO samples. -func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger log.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { +func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger *slog.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { if s.ooo == nil { s.ooo = &memSeriesOOOFields{} } @@ -1790,7 +1788,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange // cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk. // The caller must ensure that s is locked and s.ooo is not nil. -func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { +func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger *slog.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper, logger) s.ooo.oooHeadChunk = &oooHeadChunk{ @@ -1803,7 +1801,7 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk } // s must be locked when calling. -func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) []chunks.ChunkDiskMapperRef { +func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger *slog.Logger) []chunks.ChunkDiskMapperRef { if s.ooo == nil || s.ooo.oooHeadChunk == nil { // OOO is not enabled or there is no head chunk, so nothing to m-map here. return nil @@ -1816,7 +1814,7 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, len(chks)) for _, memchunk := range chks { if len(s.ooo.oooMmappedChunks) >= (oooChunkIDMask - 1) { - level.Error(logger).Log("msg", "Too many OOO chunks, dropping data", "series", s.lset.String()) + logger.Error("Too many OOO chunks, dropping data", "series", s.lset.String()) break } chunkRef := chunkDiskMapper.WriteChunk(s.ref, memchunk.minTime, memchunk.maxTime, memchunk.chunk, true, handleChunkWriteError) diff --git a/tsdb/head_dedupelabels.go b/tsdb/head_dedupelabels.go index a16d907261..a75f337224 100644 --- a/tsdb/head_dedupelabels.go +++ b/tsdb/head_dedupelabels.go @@ -16,8 +16,7 @@ package tsdb import ( - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "log/slog" "github.com/prometheus/prometheus/model/labels" ) @@ -31,8 +30,8 @@ func (s *memSeries) labels() labels.Labels { // RebuildSymbolTable goes through all the series in h, build a SymbolTable with all names and values, // replace each series' Labels with one using that SymbolTable. -func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { - level.Info(logger).Log("msg", "RebuildSymbolTable starting") +func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable { + logger.Info("RebuildSymbolTable starting") st := labels.NewSymbolTable() builder := labels.NewScratchBuilderWithSymbolTable(st, 0) rebuildLabels := func(lbls labels.Labels) labels.Labels { @@ -66,7 +65,7 @@ func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { if e, ok := h.exemplars.(withReset); ok { e.ResetSymbolTable(st) } - level.Info(logger).Log("msg", "RebuildSymbolTable finished", "size", st.Len()) + logger.Info("RebuildSymbolTable finished", "size", st.Len()) return st } diff --git a/tsdb/head_other.go b/tsdb/head_other.go index fea91530dc..c73872c12e 100644 --- a/tsdb/head_other.go +++ b/tsdb/head_other.go @@ -16,7 +16,7 @@ package tsdb import ( - "github.com/go-kit/log" + "log/slog" "github.com/prometheus/prometheus/model/labels" ) @@ -27,6 +27,6 @@ func (s *memSeries) labels() labels.Labels { } // RebuildSymbolTable is a no-op when not using dedupelabels. -func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { +func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable { return nil } diff --git a/tsdb/head_read.go b/tsdb/head_read.go index d81ffbb6a0..29adc3ee74 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -21,8 +21,6 @@ import ( "slices" "sync" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -132,7 +130,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - level.Debug(h.head.logger).Log("msg", "Looked up series not found") + h.head.logger.Debug("Looked up series not found") } else { series = append(series, s) } @@ -165,7 +163,7 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - level.Debug(h.head.logger).Log("msg", "Looked up series not found") + h.head.logger.Debug("Looked up series not found") continue } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 14a1d0d474..8103926dc6 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -24,7 +24,6 @@ import ( "sync" "time" - "github.com/go-kit/log/level" "go.uber.org/atomic" "github.com/prometheus/prometheus/model/exemplar" @@ -128,7 +127,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch // replaying the WAL, so lets just log the error if it's not that type. err = h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) { - level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err) + h.logger.Warn("Unexpected error when replaying WAL on exemplar record", "err", err) } } }(exemplarsInput) @@ -421,8 +420,8 @@ Outer: } if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 { - level.Warn(h.logger).Log( - "msg", "Unknown series references", + h.logger.Warn( + "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load(), "histograms", unknownHistogramRefs.Load(), @@ -430,7 +429,7 @@ Outer: ) } if count := mmapOverlappingChunks.Load(); count > 0 { - level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", count) + h.logger.Info("Overlapping m-map chunks on duplicate series records", "count", count) } return nil } @@ -446,8 +445,8 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m mmc[0].minTime, mmc[len(mmc)-1].maxTime, ) { - level.Debug(h.logger).Log( - "msg", "M-mapped chunks overlap on a duplicate series record", + h.logger.Debug( + "M-mapped chunks overlap on a duplicate series record", "series", mSeries.labels().String(), "oldref", mSeries.ref, "oldmint", mSeries.mmappedChunks[0].minTime, @@ -911,7 +910,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } if unknownRefs.Load() > 0 || mmapMarkerUnknownRefs.Load() > 0 { - level.Warn(h.logger).Log("msg", "Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) + h.logger.Warn("Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) } return nil } @@ -1212,7 +1211,7 @@ const chunkSnapshotPrefix = "chunk_snapshot." func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { if h.wal == nil { // If we are not storing any WAL, does not make sense to take a snapshot too. - level.Warn(h.logger).Log("msg", "skipping chunk snapshotting as WAL is disabled") + h.logger.Warn("skipping chunk snapshotting as WAL is disabled") return &ChunkSnapshotStats{}, nil } h.chunkSnapshotMtx.Lock() @@ -1361,7 +1360,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { // Leftover old chunk snapshots do not cause problems down the line beyond // occupying disk space. // They will just be ignored since a higher chunk snapshot exists. - level.Error(h.logger).Log("msg", "delete old chunk snapshots", "err", err) + h.logger.Error("delete old chunk snapshots", "err", err) } return stats, nil } @@ -1371,12 +1370,12 @@ func chunkSnapshotDir(wlast, woffset int) string { } func (h *Head) performChunkSnapshot() error { - level.Info(h.logger).Log("msg", "creating chunk snapshot") + h.logger.Info("creating chunk snapshot") startTime := time.Now() stats, err := h.ChunkSnapshot() elapsed := time.Since(startTime) if err == nil { - level.Info(h.logger).Log("msg", "chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) + h.logger.Info("chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) } if err != nil { return fmt.Errorf("chunk snapshot: %w", err) @@ -1491,7 +1490,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie } defer func() { if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "error while closing the wal segments reader", "err", err) + h.logger.Warn("error while closing the wal segments reader", "err", err) } }() @@ -1680,9 +1679,9 @@ Outer: } elapsed := time.Since(start) - level.Info(h.logger).Log("msg", "chunk snapshot loaded", "dir", dir, "num_series", numSeries, "duration", elapsed.String()) + h.logger.Info("chunk snapshot loaded", "dir", dir, "num_series", numSeries, "duration", elapsed.String()) if unknownRefs > 0 { - level.Warn(h.logger).Log("msg", "unknown series references during chunk snapshot replay", "count", unknownRefs) + h.logger.Warn("unknown series references during chunk snapshot replay", "count", unknownRefs) } return snapIdx, snapOffset, refSeries, nil diff --git a/tsdb/repair.go b/tsdb/repair.go index 9d2c5738d1..8bdc645b5e 100644 --- a/tsdb/repair.go +++ b/tsdb/repair.go @@ -17,19 +17,17 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "os" "path/filepath" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" ) // repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in // commit 129773b41a565fde5156301e37f9a87158030443. -func repairBadIndexVersion(logger log.Logger, dir string) error { +func repairBadIndexVersion(logger *slog.Logger, dir string) error { // All blocks written by Prometheus 2.1 with a meta.json version of 2 are affected. // We must actually set the index file version to 2 and revert the meta.json version back to 1. dirs, err := blockDirs(dir) @@ -41,7 +39,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { defer func() { for _, tmp := range tmpFiles { if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } } }() @@ -49,20 +47,20 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { for _, d := range dirs { meta, err := readBogusMetaFile(d) if err != nil { - level.Error(logger).Log("msg", "failed to read meta.json for a block during repair process; skipping", "dir", d, "err", err) + logger.Error("failed to read meta.json for a block during repair process; skipping", "dir", d, "err", err) continue } if meta.Version == metaVersion1 { - level.Info(logger).Log( - "msg", "Found healthy block", + logger.Info( + "Found healthy block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, ) continue } - level.Info(logger).Log( - "msg", "Fixing broken block", + logger.Info( + "Fixing broken block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, diff --git a/tsdb/tombstones/tombstones.go b/tsdb/tombstones/tombstones.go index 4cea5005db..dcba298f3b 100644 --- a/tsdb/tombstones/tombstones.go +++ b/tsdb/tombstones/tombstones.go @@ -19,15 +19,13 @@ import ( "fmt" "hash" "hash/crc32" + "log/slog" "math" "os" "path/filepath" "sort" "sync" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/encoding" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -76,7 +74,7 @@ type Reader interface { Close() error } -func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { +func WriteFile(logger *slog.Logger, dir string, tr Reader) (int64, error) { path := filepath.Join(dir, TombstonesFilename) tmp := path + ".tmp" hash := newCRC32() @@ -89,11 +87,11 @@ func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { defer func() { if f != nil { if err := f.Close(); err != nil { - level.Error(logger).Log("msg", "close tmp file", "err", err.Error()) + logger.Error("close tmp file", "err", err.Error()) } } if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } }() diff --git a/tsdb/tombstones/tombstones_test.go b/tsdb/tombstones/tombstones_test.go index 36c9f1c1e3..cbf686e4bb 100644 --- a/tsdb/tombstones/tombstones_test.go +++ b/tsdb/tombstones/tombstones_test.go @@ -20,10 +20,11 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/stretchr/testify/require" "go.uber.org/goleak" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/storage" ) @@ -50,7 +51,7 @@ func TestWriteAndReadbackTombstones(t *testing.T) { stones.AddInterval(storage.SeriesRef(ref), dranges...) } - _, err := WriteFile(log.NewNopLogger(), tmpdir, stones) + _, err := WriteFile(promslog.NewNopLogger(), tmpdir, stones) require.NoError(t, err) restr, _, err := ReadTombstones(tmpdir) diff --git a/tsdb/tsdbblockutil.go b/tsdb/tsdbblockutil.go index f7b27c2e08..b49757223f 100644 --- a/tsdb/tsdbblockutil.go +++ b/tsdb/tsdbblockutil.go @@ -16,10 +16,9 @@ package tsdb import ( "context" "fmt" + "log/slog" "path/filepath" - "github.com/go-kit/log" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" ) @@ -27,7 +26,7 @@ import ( var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time") // CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk. -func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger log.Logger) (string, error) { +func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger *slog.Logger) (string, error) { if chunkRange == 0 { chunkRange = DefaultBlockDuration } @@ -41,7 +40,7 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l } defer func() { if err := w.Close(); err != nil { - logger.Log("err closing blockwriter", err.Error()) + logger.Error("err closing blockwriter", "err", err.Error()) } }() diff --git a/tsdb/tsdbutil/dir_locker.go b/tsdb/tsdbutil/dir_locker.go index fa939879ca..4b69e1f9d6 100644 --- a/tsdb/tsdbutil/dir_locker.go +++ b/tsdb/tsdbutil/dir_locker.go @@ -16,11 +16,10 @@ package tsdbutil import ( "errors" "fmt" + "log/slog" "os" "path/filepath" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -34,7 +33,7 @@ const ( ) type DirLocker struct { - logger log.Logger + logger *slog.Logger createdCleanly prometheus.Gauge @@ -43,7 +42,7 @@ type DirLocker struct { } // NewDirLocker creates a DirLocker that can obtain an exclusive lock on dir. -func NewDirLocker(dir, subsystem string, l log.Logger, r prometheus.Registerer) (*DirLocker, error) { +func NewDirLocker(dir, subsystem string, l *slog.Logger, r prometheus.Registerer) (*DirLocker, error) { lock := &DirLocker{ logger: l, createdCleanly: prometheus.NewGauge(prometheus.GaugeOpts{ @@ -74,7 +73,7 @@ func (l *DirLocker) Lock() error { } if _, err := os.Stat(l.path); err == nil { - level.Warn(l.logger).Log("msg", "A lockfile from a previous execution already existed. It was replaced", "file", l.path) + l.logger.Warn("A lockfile from a previous execution already existed. It was replaced", "file", l.path) l.createdCleanly.Set(lockfileReplaced) } else { diff --git a/tsdb/tsdbutil/dir_locker_test.go b/tsdb/tsdbutil/dir_locker_test.go index fc7d905b2d..65e2761692 100644 --- a/tsdb/tsdbutil/dir_locker_test.go +++ b/tsdb/tsdbutil/dir_locker_test.go @@ -16,15 +16,16 @@ package tsdbutil import ( "testing" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/util/testutil" ) func TestLockfile(t *testing.T) { TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*DirLocker, testutil.Closer) { - locker, err := NewDirLocker(data, "tsdbutil", log.NewNopLogger(), nil) + locker, err := NewDirLocker(data, "tsdbutil", promslog.NewNopLogger(), nil) require.NoError(t, err) if createLock { diff --git a/tsdb/tsdbutil/dir_locker_testutil.go b/tsdb/tsdbutil/dir_locker_testutil.go index a4cf5abd68..7228dbafed 100644 --- a/tsdb/tsdbutil/dir_locker_testutil.go +++ b/tsdb/tsdbutil/dir_locker_testutil.go @@ -18,8 +18,8 @@ import ( "os" "testing" - "github.com/go-kit/log" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/util/testutil" @@ -68,7 +68,7 @@ func TestDirLockerUsage(t *testing.T, open func(t *testing.T, data string, creat // Test preconditions (file already exists + lockfile option) if c.fileAlreadyExists { - tmpLocker, err := NewDirLocker(tmpdir, "tsdb", log.NewNopLogger(), nil) + tmpLocker, err := NewDirLocker(tmpdir, "tsdb", promslog.NewNopLogger(), nil) require.NoError(t, err) err = os.WriteFile(tmpLocker.path, []byte{}, 0o644) require.NoError(t, err) diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index a16cd5fc74..58e11c770e 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "os" "path/filepath" @@ -25,9 +26,6 @@ import ( "strconv" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -94,11 +92,11 @@ const checkpointPrefix = "checkpoint." // segmented format as the original WAL itself. // This makes it easy to read it through the WAL package and concatenate // it with the original WAL. -func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { +func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { stats := &CheckpointStats{} var sgmReader io.ReadCloser - level.Info(logger).Log("msg", "Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) + logger.Info("Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) { var sgmRange []SegmentRange diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index a9786454de..8ee193f5ac 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -23,9 +23,10 @@ import ( "strings" "testing" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" @@ -244,7 +245,7 @@ func TestCheckpoint(t *testing.T) { } require.NoError(t, w.Close()) - stats, err := Checkpoint(log.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool { + stats, err := Checkpoint(promslog.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool { return x%2 == 0 }, last/2) require.NoError(t, err) @@ -354,7 +355,7 @@ func TestCheckpointNoTmpFolderAfterError(t *testing.T) { require.NoError(t, f.Close()) // Run the checkpoint and since the wlog contains corrupt data this should return an error. - _, err = Checkpoint(log.NewNopLogger(), w, 0, 1, nil, 0) + _, err = Checkpoint(promslog.NewNopLogger(), w, 0, 1, nil, 0) require.Error(t, err) // Walk the wlog dir to make sure there are no tmp folder left behind after the error. diff --git a/tsdb/wlog/live_reader.go b/tsdb/wlog/live_reader.go index 6eaef5f396..a017d362d1 100644 --- a/tsdb/wlog/live_reader.go +++ b/tsdb/wlog/live_reader.go @@ -20,9 +20,8 @@ import ( "fmt" "hash/crc32" "io" + "log/slog" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" "github.com/prometheus/client_golang/prometheus" @@ -51,7 +50,7 @@ func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics { } // NewLiveReader returns a new live reader. -func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { +func NewLiveReader(logger *slog.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { // Calling zstd.NewReader with a nil io.Reader and no options cannot return an error. zstdReader, _ := zstd.NewReader(nil) @@ -73,7 +72,7 @@ func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) * // that are still in the process of being written, and returns records as soon // as they can be read. type LiveReader struct { - logger log.Logger + logger *slog.Logger rdr io.Reader err error rec []byte @@ -311,7 +310,7 @@ func (r *LiveReader) readRecord() ([]byte, int, error) { return nil, 0, fmt.Errorf("record would overflow current page: %d > %d", r.readIndex+recordHeaderSize+length, pageSize) } r.metrics.readerCorruptionErrors.WithLabelValues("record_span_page").Inc() - level.Warn(r.logger).Log("msg", "Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) + r.logger.Warn("Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) } if recordHeaderSize+length > pageSize { return nil, 0, fmt.Errorf("record length greater than a single page: %d > %d", recordHeaderSize+length, pageSize) diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 484eff3664..2ac63cbf15 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -29,11 +29,11 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/prometheus/prometheus/util/testutil" ) type reader interface { @@ -53,7 +53,7 @@ var readerConstructors = map[string]func(io.Reader) reader{ return NewReader(r) }, "LiveReader": func(r io.Reader) reader { - lr := NewLiveReader(log.NewNopLogger(), NewLiveReaderMetrics(nil), r) + lr := NewLiveReader(promslog.NewNopLogger(), NewLiveReaderMetrics(nil), r) lr.eofNonErr = true return lr }, @@ -196,7 +196,7 @@ func TestReader(t *testing.T) { } func TestReader_Live(t *testing.T) { - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() for i := range testReaderCases { t.Run(strconv.Itoa(i), func(t *testing.T) { @@ -353,7 +353,7 @@ func TestReaderFuzz(t *testing.T) { } func TestReaderFuzz_Live(t *testing.T) { - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() @@ -441,7 +441,7 @@ func TestReaderFuzz_Live(t *testing.T) { func TestLiveReaderCorrupt_ShortFile(t *testing.T) { // Write a corrupt WAL segment, there is one record of pageSize in length, // but the segment is only half written. - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() dir := t.TempDir() w, err := NewSize(nil, nil, dir, pageSize, CompressionNone) @@ -481,7 +481,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) { // Write a corrupt WAL segment, when record len > page size. - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() dir := t.TempDir() w, err := NewSize(nil, nil, dir, pageSize*2, CompressionNone) diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index ac5041e87b..d68ef2accb 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "os" "path/filepath" @@ -24,9 +25,8 @@ import ( "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -84,7 +84,7 @@ type WatcherMetrics struct { type Watcher struct { name string writer WriteTo - logger log.Logger + logger *slog.Logger walDir string lastCheckpoint string sendExemplars bool @@ -172,9 +172,9 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { } // NewWatcher creates a new WAL watcher for a given WriteTo. -func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher { +func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger *slog.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return &Watcher{ logger: logger, @@ -222,7 +222,7 @@ func (w *Watcher) setMetrics() { // Start the Watcher. func (w *Watcher) Start() { w.setMetrics() - level.Info(w.logger).Log("msg", "Starting WAL watcher", "queue", w.name) + w.logger.Info("Starting WAL watcher", "queue", w.name) go w.loop() } @@ -241,7 +241,7 @@ func (w *Watcher) Stop() { w.metrics.currentSegment.DeleteLabelValues(w.name) } - level.Info(w.logger).Log("msg", "WAL watcher stopped", "queue", w.name) + w.logger.Info("WAL watcher stopped", "queue", w.name) } func (w *Watcher) loop() { @@ -251,7 +251,7 @@ func (w *Watcher) loop() { for !isClosed(w.quit) { w.SetStartTime(time.Now()) if err := w.Run(); err != nil { - level.Error(w.logger).Log("msg", "error tailing WAL", "err", err) + w.logger.Error("error tailing WAL", "err", err) } select { @@ -274,7 +274,7 @@ func (w *Watcher) Run() error { // Run will be called again if there was a failure to read the WAL. w.sendSamples = false - level.Info(w.logger).Log("msg", "Replaying WAL", "queue", w.name) + w.logger.Info("Replaying WAL", "queue", w.name) // Backfill from the checkpoint first if it exists. lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir) @@ -294,13 +294,13 @@ func (w *Watcher) Run() error { return err } - level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) + w.logger.Debug("Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) for !isClosed(w.quit) { w.currentSegmentMetric.Set(float64(currentSegment)) // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment. // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment. - level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment) + w.logger.Debug("Processing segment", "currentSegment", currentSegment) if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) { return err } @@ -338,9 +338,9 @@ func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, s // Ignore all errors reading to end of segment whilst replaying the WAL. if !tail { if err != nil && !errors.Is(err, io.EOF) { - level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) + w.logger.Warn("Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) } else if r.Offset() != size { - level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) + w.logger.Warn("Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) } return ErrIgnorable } @@ -403,7 +403,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { <-gcSem }() if err := w.garbageCollectSeries(segmentNum); err != nil { - level.Warn(w.logger).Log("msg", "Error process checkpoint", "err", err) + w.logger.Warn("Error process checkpoint", "err", err) } }() default: @@ -424,7 +424,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // we haven't read due to a notification in quite some time, try reading anyways case <-readTicker.C: - level.Debug(w.logger).Log("msg", "Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout) + w.logger.Debug("Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout) err := w.readAndHandleError(reader, segmentNum, tail, size) if err != nil { return err @@ -460,11 +460,11 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { } if index >= segmentNum { - level.Debug(w.logger).Log("msg", "Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) + w.logger.Debug("Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) return nil } - level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum) + w.logger.Debug("New checkpoint detected", "new", dir, "currentSegment", segmentNum) if err = w.readCheckpoint(dir, (*Watcher).readSegmentForGC); err != nil { return fmt.Errorf("readCheckpoint: %w", err) @@ -519,7 +519,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } samplesToSend = append(samplesToSend, s) } @@ -564,7 +564,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } histogramsToSend = append(histogramsToSend, h) } @@ -592,7 +592,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } floatHistogramsToSend = append(floatHistogramsToSend, fh) } @@ -670,7 +670,7 @@ type segmentReadFn func(w *Watcher, r *LiveReader, segmentNum int, tail bool) er // Read all the series records from a Checkpoint directory. func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) error { - level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir) + w.logger.Debug("Reading checkpoint", "dir", checkpointDir) index, err := checkpointNum(checkpointDir) if err != nil { return fmt.Errorf("checkpointNum: %w", err) @@ -704,7 +704,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err } } - level.Debug(w.logger).Log("msg", "Read series references from checkpoint", "checkpoint", checkpointDir) + w.logger.Debug("Read series references from checkpoint", "checkpoint", checkpointDir) return nil } diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index b8c2380bdf..68c2c5afda 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -22,8 +22,8 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" @@ -375,7 +375,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { } } - Checkpoint(log.NewNopLogger(), w, 0, 1, func(x chunks.HeadSeriesRef) bool { return true }, 0) + Checkpoint(promslog.NewNopLogger(), w, 0, 1, func(x chunks.HeadSeriesRef) bool { return true }, 0) w.Truncate(1) // Write more records after checkpointing. @@ -466,7 +466,7 @@ func TestReadCheckpoint(t *testing.T) { } _, err = w.NextSegmentSync() require.NoError(t, err) - _, err = Checkpoint(log.NewNopLogger(), w, 30, 31, func(x chunks.HeadSeriesRef) bool { return true }, 0) + _, err = Checkpoint(promslog.NewNopLogger(), w, 30, 31, func(x chunks.HeadSeriesRef) bool { return true }, 0) require.NoError(t, err) require.NoError(t, w.Truncate(32)) @@ -629,7 +629,7 @@ func TestCheckpointSeriesReset(t *testing.T) { return wt.checkNumSeries() == seriesCount }, 10*time.Second, 1*time.Second) - _, err = Checkpoint(log.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0) + _, err = Checkpoint(promslog.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0) require.NoError(t, err) err = w.Truncate(5) diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index b14521f358..54c257d61a 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -21,6 +21,7 @@ import ( "fmt" "hash/crc32" "io" + "log/slog" "os" "path/filepath" "slices" @@ -28,11 +29,10 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/tsdb/fileutil" ) @@ -121,7 +121,7 @@ func (e *CorruptionErr) Unwrap() error { } // OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. -func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { +func OpenWriteSegment(logger *slog.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0o666) if err != nil { @@ -138,7 +138,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { // If it was torn mid-record, a full read (which the caller should do anyway // to ensure integrity) will detect it as a corruption by the end. if d := stat.Size() % pageSize; d != 0 { - level.Warn(logger).Log("msg", "Last page of the wlog is torn, filling it with zeros", "segment", segName) + logger.Warn("Last page of the wlog is torn, filling it with zeros", "segment", segName) if _, err := f.Write(make([]byte, pageSize-d)); err != nil { f.Close() return nil, fmt.Errorf("zero-pad torn page: %w", err) @@ -201,7 +201,7 @@ func ParseCompressionType(compress bool, compressType string) CompressionType { // beyond the most recent segment. type WL struct { dir string - logger log.Logger + logger *slog.Logger segmentSize int mtx sync.RWMutex segment *Segment // Active segment. @@ -286,7 +286,7 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { }, func() float64 { val, err := w.Size() if err != nil { - level.Error(w.logger).Log("msg", "Failed to calculate size of \"wal\" dir", + w.logger.Error("Failed to calculate size of \"wal\" dir", "err", err.Error()) } return float64(val) @@ -309,13 +309,13 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { } // New returns a new WAL over the given directory. -func New(logger log.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { +func New(logger *slog.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { return NewSize(logger, reg, dir, DefaultSegmentSize, compress) } // NewSize returns a new write log over the given directory. // New segments are created with the specified size. -func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { +func NewSize(logger *slog.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } @@ -323,7 +323,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi return nil, fmt.Errorf("create dir: %w", err) } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } var zstdWriter *zstd.Encoder @@ -378,9 +378,9 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi } // Open an existing WAL. -func Open(logger log.Logger, dir string) (*WL, error) { +func Open(logger *slog.Logger, dir string) (*WL, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } zstdWriter, err := zstd.NewWriter(nil) if err != nil { @@ -443,7 +443,7 @@ func (w *WL) Repair(origErr error) error { if cerr.Segment < 0 { return errors.New("corruption error does not specify position") } - level.Warn(w.logger).Log("msg", "Starting corruption repair", + w.logger.Warn("Starting corruption repair", "segment", cerr.Segment, "offset", cerr.Offset) // All segments behind the corruption can no longer be used. @@ -451,7 +451,7 @@ func (w *WL) Repair(origErr error) error { if err != nil { return fmt.Errorf("list segments: %w", err) } - level.Warn(w.logger).Log("msg", "Deleting all segments newer than corrupted segment", "segment", cerr.Segment) + w.logger.Warn("Deleting all segments newer than corrupted segment", "segment", cerr.Segment) for _, s := range segs { if w.segment.i == s.index { @@ -473,7 +473,7 @@ func (w *WL) Repair(origErr error) error { // Regardless of the corruption offset, no record reaches into the previous segment. // So we can safely repair the WAL by removing the segment and re-inserting all // its records up to the corruption. - level.Warn(w.logger).Log("msg", "Rewrite corrupted segment", "segment", cerr.Segment) + w.logger.Warn("Rewrite corrupted segment", "segment", cerr.Segment) fn := SegmentName(w.Dir(), cerr.Segment) tmpfn := fn + ".repair" @@ -583,10 +583,10 @@ func (w *WL) nextSegment(async bool) (int, error) { // Don't block further writes by fsyncing the last segment. f := func() { if err := w.fsync(prev); err != nil { - level.Error(w.logger).Log("msg", "sync previous segment", "err", err) + w.logger.Error("sync previous segment", "err", err) } if err := prev.Close(); err != nil { - level.Error(w.logger).Log("msg", "close previous segment", "err", err) + w.logger.Error("close previous segment", "err", err) } } if async { @@ -890,10 +890,10 @@ func (w *WL) Close() (err error) { <-donec if err = w.fsync(w.segment); err != nil { - level.Error(w.logger).Log("msg", "sync previous segment", "err", err) + w.logger.Error("sync previous segment", "err", err) } if err := w.segment.Close(); err != nil { - level.Error(w.logger).Log("msg", "close previous segment", "err", err) + w.logger.Error("close previous segment", "err", err) } w.metrics.Unregister() diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index 165d2758f0..d195aaee2f 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -23,14 +23,13 @@ import ( "path/filepath" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" "github.com/prometheus/prometheus/tsdb/fileutil" - "github.com/prometheus/prometheus/util/testutil" ) func TestMain(m *testing.M) { @@ -215,7 +214,7 @@ func TestCorruptAndCarryOn(t *testing.T) { dir := t.TempDir() var ( - logger = testutil.NewLogger(t) + logger = promslog.NewNopLogger() segmentSize = pageSize * 3 recordSize = (pageSize / 3) - recordHeaderSize ) @@ -568,7 +567,7 @@ func TestUnregisterMetrics(t *testing.T) { reg := prometheus.NewRegistry() for i := 0; i < 2; i++ { - wl, err := New(log.NewNopLogger(), reg, t.TempDir(), CompressionNone) + wl, err := New(promslog.NewNopLogger(), reg, t.TempDir(), CompressionNone) require.NoError(t, err) require.NoError(t, wl.Close()) } diff --git a/util/logging/dedupe.go b/util/logging/dedupe.go index d490a6afdf..37b345b395 100644 --- a/util/logging/dedupe.go +++ b/util/logging/dedupe.go @@ -14,12 +14,10 @@ package logging import ( - "bytes" + "context" + "log/slog" "sync" "time" - - "github.com/go-kit/log" - "github.com/go-logfmt/logfmt" ) const ( @@ -28,22 +26,9 @@ const ( maxEntries = 1024 ) -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -// Deduper implement log.Logger, dedupes log lines. +// Deduper implements *slog.Handler, dedupes log lines based on a time duration. type Deduper struct { - next log.Logger + next *slog.Logger repeat time.Duration quit chan struct{} mtx sync.RWMutex @@ -51,7 +36,7 @@ type Deduper struct { } // Dedupe log lines to next, only repeating every repeat duration. -func Dedupe(next log.Logger, repeat time.Duration) *Deduper { +func Dedupe(next *slog.Logger, repeat time.Duration) *Deduper { d := &Deduper{ next: next, repeat: repeat, @@ -62,6 +47,83 @@ func Dedupe(next log.Logger, repeat time.Duration) *Deduper { return d } +// Enabled returns true if the Deduper's internal slog.Logger is enabled at the +// provided context and log level, and returns false otherwise. It implements +// slog.Handler. +func (d *Deduper) Enabled(ctx context.Context, level slog.Level) bool { + d.mtx.RLock() + enabled := d.next.Enabled(ctx, level) + d.mtx.RUnlock() + + return enabled +} + +// Handle uses the provided context and slog.Record to deduplicate messages +// every 1m. Log records received within the interval are not acted on, and +// thus dropped. Log records that pass deduplication and need action invoke the +// Handle() method on the Deduper's internal slog.Logger's handler, effectively +// chaining log calls to the internal slog.Logger. +func (d *Deduper) Handle(ctx context.Context, r slog.Record) error { + line := r.Message + d.mtx.RLock() + last, ok := d.seen[line] + d.mtx.RUnlock() + + if ok && time.Since(last) < d.repeat { + return nil + } + + d.mtx.Lock() + if len(d.seen) < maxEntries { + d.seen[line] = time.Now() + } + d.mtx.Unlock() + + return d.next.Handler().Handle(ctx, r.Clone()) +} + +// WithAttrs adds the provided attributes to the Deduper's internal +// slog.Logger. It implements slog.Handler. +func (d *Deduper) WithAttrs(attrs []slog.Attr) slog.Handler { + d.mtx.Lock() + d.next = slog.New(d.next.Handler().WithAttrs(attrs)) + d.mtx.Unlock() + return d +} + +// WithGroup adds the provided group name to the Deduper's internal +// slog.Logger. It implements slog.Handler. +func (d *Deduper) WithGroup(name string) slog.Handler { + d.mtx.Lock() + d.next = slog.New(d.next.Handler().WithGroup(name)) + d.mtx.Unlock() + return d +} + +// Info logs the provided message and key-value arguments using the Deduper's +// internal slog.Logger. It is simply a wrapper around slog.Logger.Info(). +func (d *Deduper) Info(msg string, args ...any) { + d.next.Info(msg, args...) +} + +// Warn logs the provided message and key-value arguments using the Deduper's +// internal slog.Logger. It is simply a wrapper around slog.Logger.Warn(). +func (d *Deduper) Warn(msg string, args ...any) { + d.next.Warn(msg, args...) +} + +// Error logs the provided message and key-value arguments using the Deduper's +// internal slog.Logger. It is simply a wrapper around slog.Logger.Error(). +func (d *Deduper) Error(msg string, args ...any) { + d.next.Error(msg, args...) +} + +// Debug logs the provided message and key-value arguments using the Deduper's +// internal slog.Logger. It is simply a wrapper around slog.Logger.Debug(). +func (d *Deduper) Debug(msg string, args ...any) { + d.next.Debug(msg, args...) +} + // Stop the Deduper. func (d *Deduper) Stop() { close(d.quit) @@ -87,44 +149,3 @@ func (d *Deduper) run() { } } } - -// Log implements log.Logger. -func (d *Deduper) Log(keyvals ...interface{}) error { - line, err := encode(keyvals...) - if err != nil { - return err - } - - d.mtx.RLock() - last, ok := d.seen[line] - d.mtx.RUnlock() - - if ok && time.Since(last) < d.repeat { - return nil - } - - d.mtx.Lock() - if len(d.seen) < maxEntries { - d.seen[line] = time.Now() - } - d.mtx.Unlock() - - return d.next.Log(keyvals...) -} - -func encode(keyvals ...interface{}) (string, error) { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.buf.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return "", err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return "", err - } - - return enc.buf.String(), nil -} diff --git a/util/logging/dedupe_test.go b/util/logging/dedupe_test.go index e05d6454c5..5baa90b038 100644 --- a/util/logging/dedupe_test.go +++ b/util/logging/dedupe_test.go @@ -14,34 +14,45 @@ package logging import ( + "bytes" + "log/slog" + "strings" "testing" "time" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) -type counter int - -func (c *counter) Log(...interface{}) error { - (*c)++ - return nil -} - func TestDedupe(t *testing.T) { - var c counter - d := Dedupe(&c, 100*time.Millisecond) + var buf bytes.Buffer + d := Dedupe(promslog.New(&promslog.Config{Writer: &buf}), 100*time.Millisecond) + dlog := slog.New(d) defer d.Stop() // Log 10 times quickly, ensure they are deduped. for i := 0; i < 10; i++ { - err := d.Log("msg", "hello") - require.NoError(t, err) + dlog.Info("test", "hello", "world") } - require.Equal(t, 1, int(c)) + + // Trim empty lines + lines := []string{} + for _, line := range strings.Split(buf.String(), "\n") { + if line != "" { + lines = append(lines, line) + } + } + require.Len(t, lines, 1) // Wait, then log again, make sure it is logged. time.Sleep(200 * time.Millisecond) - err := d.Log("msg", "hello") - require.NoError(t, err) - require.Equal(t, 2, int(c)) + dlog.Info("test", "hello", "world") + // Trim empty lines + lines = []string{} + for _, line := range strings.Split(buf.String(), "\n") { + if line != "" { + lines = append(lines, line) + } + } + require.Len(t, lines, 2) } diff --git a/util/logging/file.go b/util/logging/file.go index 2afa828547..f20927beda 100644 --- a/util/logging/file.go +++ b/util/logging/file.go @@ -15,20 +15,15 @@ package logging import ( "fmt" + "log/slog" "os" - "time" - "github.com/go-kit/log" + "github.com/prometheus/common/promslog" ) -var timestampFormat = log.TimestampFormat( - func() time.Time { return time.Now().UTC() }, - "2006-01-02T15:04:05.000Z07:00", -) - -// JSONFileLogger represents a logger that writes JSON to a file. +// JSONFileLogger represents a logger that writes JSON to a file. It implements the promql.QueryLogger interface. type JSONFileLogger struct { - logger log.Logger + logger *slog.Logger file *os.File } @@ -40,21 +35,48 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { - return nil, fmt.Errorf("can't create json logger: %w", err) + return nil, fmt.Errorf("can't create json log file: %w", err) } + jsonFmt := &promslog.AllowedFormat{} + _ = jsonFmt.Set("json") return &JSONFileLogger{ - logger: log.With(log.NewJSONLogger(f), "ts", timestampFormat), + logger: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}), file: f, }, nil } -// Close closes the underlying file. +// Close closes the underlying file. It implements the promql.QueryLogger interface. func (l *JSONFileLogger) Close() error { return l.file.Close() } -// Log calls the Log function of the underlying logger. -func (l *JSONFileLogger) Log(i ...interface{}) error { - return l.logger.Log(i...) +// With calls the `With()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) With(args ...any) { + l.logger = l.logger.With(args...) +} + +// Info calls the `Info()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Info(msg string, args ...any) { + l.logger.Info(msg, args...) +} + +// Error calls the `Error()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Error(msg string, args ...any) { + l.logger.Error(msg, args...) +} + +// Debug calls the `Debug()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Debug(msg string, args ...any) { + l.logger.Debug(msg, args...) +} + +// Warn calls the `Warn()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Warn(msg string, args ...any) { + l.logger.Warn(msg, args...) } diff --git a/util/logging/file_test.go b/util/logging/file_test.go index 0e760a4848..8ab4754339 100644 --- a/util/logging/file_test.go +++ b/util/logging/file_test.go @@ -34,12 +34,13 @@ func TestJSONFileLogger_basic(t *testing.T) { require.NoError(t, err) require.NotNil(t, l, "logger can't be nil") - err = l.Log("test", "yes") + l.Info("test", "hello", "world") require.NoError(t, err) r := make([]byte, 1024) _, err = f.Read(r) require.NoError(t, err) - result, err := regexp.Match(`^{"test":"yes","ts":"[^"]+"}\n`, r) + + result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":\{.+\},"msg":"test","hello":"world"}\n`, r) require.NoError(t, err) require.True(t, result, "unexpected content: %s", r) @@ -63,14 +64,14 @@ func TestJSONFileLogger_parallel(t *testing.T) { require.NoError(t, err) require.NotNil(t, l, "logger can't be nil") - err = l.Log("test", "yes") + l.Info("test", "hello", "world") require.NoError(t, err) l2, err := NewJSONFileLogger(f.Name()) require.NoError(t, err) require.NotNil(t, l, "logger can't be nil") - err = l2.Log("test", "yes") + l2.Info("test", "hello", "world") require.NoError(t, err) err = l.Close() diff --git a/util/logging/ratelimit.go b/util/logging/ratelimit.go deleted file mode 100644 index 32d1e249e6..0000000000 --- a/util/logging/ratelimit.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logging - -import ( - "github.com/go-kit/log" - "golang.org/x/time/rate" -) - -type ratelimiter struct { - limiter *rate.Limiter - next log.Logger -} - -// RateLimit write to a logger. -func RateLimit(next log.Logger, limit rate.Limit) log.Logger { - return &ratelimiter{ - limiter: rate.NewLimiter(limit, int(limit)), - next: next, - } -} - -func (r *ratelimiter) Log(keyvals ...interface{}) error { - if r.limiter.Allow() { - return r.next.Log(keyvals...) - } - return nil -} diff --git a/util/testutil/logging.go b/util/testutil/logging.go deleted file mode 100644 index db096ea234..0000000000 --- a/util/testutil/logging.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutil - -import ( - "testing" - - "github.com/go-kit/log" -) - -type logger struct { - t *testing.T -} - -// NewLogger returns a gokit compatible Logger which calls t.Log. -func NewLogger(t *testing.T) log.Logger { - return logger{t: t} -} - -// Log implements log.Logger. -func (t logger) Log(keyvals ...interface{}) error { - t.t.Log(keyvals...) - return nil -} diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go index bbbaaf3d6e..4d4b6f544c 100644 --- a/util/treecache/treecache.go +++ b/util/treecache/treecache.go @@ -17,12 +17,11 @@ import ( "bytes" "errors" "fmt" + "log/slog" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/go-zookeeper/zk" "github.com/prometheus/client_golang/prometheus" ) @@ -47,19 +46,19 @@ func init() { prometheus.MustRegister(numWatchers) } -// ZookeeperLogger wraps a log.Logger into a zk.Logger. +// ZookeeperLogger wraps a *slog.Logger into a zk.Logger. type ZookeeperLogger struct { - logger log.Logger + logger *slog.Logger } // NewZookeeperLogger is a constructor for ZookeeperLogger. -func NewZookeeperLogger(logger log.Logger) ZookeeperLogger { +func NewZookeeperLogger(logger *slog.Logger) ZookeeperLogger { return ZookeeperLogger{logger: logger} } // Printf implements zk.Logger. func (zl ZookeeperLogger) Printf(s string, i ...interface{}) { - level.Info(zl.logger).Log("msg", fmt.Sprintf(s, i...)) + zl.logger.Info(s, i...) } // A ZookeeperTreeCache keeps data from all children of a Zookeeper path @@ -72,7 +71,7 @@ type ZookeeperTreeCache struct { wg *sync.WaitGroup head *zookeeperTreeCacheNode - logger log.Logger + logger *slog.Logger } // A ZookeeperTreeCacheEvent models a Zookeeper event for a path. @@ -90,7 +89,7 @@ type zookeeperTreeCacheNode struct { } // NewZookeeperTreeCache creates a new ZookeeperTreeCache for a given path. -func NewZookeeperTreeCache(conn *zk.Conn, path string, events chan ZookeeperTreeCacheEvent, logger log.Logger) *ZookeeperTreeCache { +func NewZookeeperTreeCache(conn *zk.Conn, path string, events chan ZookeeperTreeCacheEvent, logger *slog.Logger) *ZookeeperTreeCache { tc := &ZookeeperTreeCache{ conn: conn, prefix: path, @@ -144,20 +143,20 @@ func (tc *ZookeeperTreeCache) loop(path string) { err := tc.recursiveNodeUpdate(path, tc.head) if err != nil { - level.Error(tc.logger).Log("msg", "Error during initial read of Zookeeper", "err", err) + tc.logger.Error("Error during initial read of Zookeeper", "err", err) failure() } for { select { case ev := <-tc.head.events: - level.Debug(tc.logger).Log("msg", "Received Zookeeper event", "event", ev) + tc.logger.Debug("Received Zookeeper event", "event", ev) if failureMode { continue } if ev.Type == zk.EventNotWatching { - level.Info(tc.logger).Log("msg", "Lost connection to Zookeeper.") + tc.logger.Info("Lost connection to Zookeeper.") failure() } else { path := strings.TrimPrefix(ev.Path, tc.prefix) @@ -178,15 +177,15 @@ func (tc *ZookeeperTreeCache) loop(path string) { switch err := tc.recursiveNodeUpdate(ev.Path, node); { case err != nil: - level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err) + tc.logger.Error("Error during processing of Zookeeper event", "err", err) failure() case tc.head.data == nil: - level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix) + tc.logger.Error("Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix) failure() } } case <-retryChan: - level.Info(tc.logger).Log("msg", "Attempting to resync state with Zookeeper") + tc.logger.Info("Attempting to resync state with Zookeeper") previousState := &zookeeperTreeCacheNode{ children: tc.head.children, } @@ -194,13 +193,13 @@ func (tc *ZookeeperTreeCache) loop(path string) { tc.head.children = make(map[string]*zookeeperTreeCacheNode) if err := tc.recursiveNodeUpdate(tc.prefix, tc.head); err != nil { - level.Error(tc.logger).Log("msg", "Error during Zookeeper resync", "err", err) + tc.logger.Error("Error during Zookeeper resync", "err", err) // Revert to our previous state. tc.head.children = previousState.children failure() } else { tc.resyncState(tc.prefix, tc.head, previousState) - level.Info(tc.logger).Log("msg", "Zookeeper resync successful") + tc.logger.Info("Zookeeper resync successful") failureMode = false } case <-tc.stop: diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 95ab7ea2ac..9fb01f5767 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -18,6 +18,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "math" "math/rand" "net" @@ -31,8 +32,6 @@ import ( "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" jsoniter "github.com/json-iterator/go" "github.com/munnerz/goautoneg" @@ -207,7 +206,7 @@ type API struct { db TSDBAdminStats dbDir string enableAdmin bool - logger log.Logger + logger *slog.Logger CORSOrigin *regexp.Regexp buildInfo *PrometheusVersion runtimeInfo func() (RuntimeInfo, error) @@ -240,7 +239,7 @@ func NewAPI( db TSDBAdminStats, dbDir string, enableAdmin bool, - logger log.Logger, + logger *slog.Logger, rr func(context.Context) RulesRetriever, remoteReadSampleLimit int, remoteReadConcurrencyLimit int, @@ -1863,7 +1862,7 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface b, err := codec.Encode(resp) if err != nil { - level.Error(api.logger).Log("msg", "error marshaling response", "url", req.URL, "err", err) + api.logger.Error("error marshaling response", "url", req.URL, "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -1871,7 +1870,7 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface w.Header().Set("Content-Type", codec.ContentType().String()) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { - level.Error(api.logger).Log("msg", "error writing response", "url", req.URL, "bytesWritten", n, "err", err) + api.logger.Error("error writing response", "url", req.URL, "bytesWritten", n, "err", err) } } @@ -1901,7 +1900,7 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter Data: data, }) if err != nil { - level.Error(api.logger).Log("msg", "error marshaling json response", "err", err) + api.logger.Error("error marshaling json response", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -1929,7 +1928,7 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) if n, err := w.Write(b); err != nil { - level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) + api.logger.Error("error writing response", "bytesWritten", n, "err", err) } } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 334c41ce86..7ac2fe5693 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -34,12 +34,11 @@ import ( "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/testutil" - "github.com/go-kit/log" jsoniter "github.com/json-iterator/go" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/common/promlog" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/route" "github.com/stretchr/testify/require" @@ -238,7 +237,7 @@ func (m *rulesRetrieverMock) CreateAlertingRules() { labels.Labels{}, "", true, - log.NewNopLogger(), + promslog.NewNopLogger(), ) rule2 := rules.NewAlertingRule( "test_metric4", @@ -250,7 +249,7 @@ func (m *rulesRetrieverMock) CreateAlertingRules() { labels.Labels{}, "", true, - log.NewNopLogger(), + promslog.NewNopLogger(), ) rule3 := rules.NewAlertingRule( "test_metric5", @@ -262,7 +261,7 @@ func (m *rulesRetrieverMock) CreateAlertingRules() { labels.FromStrings("name", "tm5"), "", false, - log.NewNopLogger(), + promslog.NewNopLogger(), ) rule4 := rules.NewAlertingRule( "test_metric6", @@ -274,7 +273,7 @@ func (m *rulesRetrieverMock) CreateAlertingRules() { labels.Labels{}, "", true, - log.NewNopLogger(), + promslog.NewNopLogger(), ) rule5 := rules.NewAlertingRule( "test_metric7", @@ -286,7 +285,7 @@ func (m *rulesRetrieverMock) CreateAlertingRules() { labels.Labels{}, "", true, - log.NewNopLogger(), + promslog.NewNopLogger(), ) var r []*rules.AlertingRule r = append(r, rule1) @@ -314,7 +313,7 @@ func (m *rulesRetrieverMock) CreateRuleGroups() { QueryFunc: rules.EngineQueryFunc(engine, storage), Appendable: storage, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {}, } @@ -471,20 +470,20 @@ func TestEndpoints(t *testing.T) { u, err := url.Parse(server.URL) require.NoError(t, err) - al := promlog.AllowedLevel{} + al := promslog.AllowedLevel{} require.NoError(t, al.Set("debug")) - af := promlog.AllowedFormat{} + af := promslog.AllowedFormat{} require.NoError(t, af.Set("logfmt")) - promlogConfig := promlog.Config{ + promslogConfig := promslog.Config{ Level: &al, Format: &af, } dbDir := t.TempDir() - remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) { + remote := remote.NewStorage(promslog.New(&promslogConfig), prometheus.DefaultRegisterer, func() (int64, error) { return 0, nil }, dbDir, 1*time.Second, nil, false) @@ -3530,7 +3529,7 @@ func TestAdminEndpoints(t *testing.T) { func TestRespondSuccess(t *testing.T) { api := API{ - logger: log.NewNopLogger(), + logger: promslog.NewNopLogger(), } api.ClearCodecs() @@ -3622,7 +3621,7 @@ func TestRespondSuccess(t *testing.T) { func TestRespondSuccess_DefaultCodecCannotEncodeResponse(t *testing.T) { api := API{ - logger: log.NewNopLogger(), + logger: promslog.NewNopLogger(), } api.ClearCodecs() diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index db16b9fb3b..f5e75615ec 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -23,9 +23,9 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/route" "github.com/stretchr/testify/require" @@ -105,7 +105,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route t.Helper() engine := promqltest.NewTestEngineWithOpts(t, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, ActiveQueryTracker: nil, MaxSamples: 100, @@ -127,7 +127,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route nil, // Only needed for admin APIs. "", // This is for snapshots, which is disabled when admin APIs are disabled. Hence empty. false, // Disable admin APIs. - log.NewNopLogger(), + promslog.NewNopLogger(), func(context.Context) RulesRetriever { return &DummyRulesRetriever{} }, 0, 0, 0, // Remote read samples and concurrency limit. false, // Not an agent. diff --git a/web/federate.go b/web/federate.go index 8176eba365..8e20a60f0f 100644 --- a/web/federate.go +++ b/web/federate.go @@ -21,7 +21,6 @@ import ( "sort" "strings" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" @@ -157,7 +156,7 @@ Loop: }) } if ws := set.Warnings(); len(ws) > 0 { - level.Debug(h.logger).Log("msg", "Federation select returned warnings", "warnings", ws) + h.logger.Debug("Federation select returned warnings", "warnings", ws) federationWarnings.Add(float64(len(ws))) } if set.Err() != nil { @@ -253,11 +252,11 @@ Loop: }) if err != nil { federationErrors.Inc() - level.Error(h.logger).Log("msg", "federation failed", "err", err) + h.logger.Error("federation failed", "err", err) return } if !nameSeen { - level.Warn(h.logger).Log("msg", "Ignoring nameless metric during federation", "metric", s.Metric) + h.logger.Warn("Ignoring nameless metric during federation", "metric", s.Metric) continue } // Attach global labels if they do not exist yet. @@ -314,7 +313,7 @@ Loop: if protMetricFam != nil { if err := enc.Encode(protMetricFam); err != nil { federationErrors.Inc() - level.Error(h.logger).Log("msg", "federation failed", "err", err) + h.logger.Error("federation failed", "err", err) } } } diff --git a/web/web.go b/web/web.go index 5e1d3d230b..21c41c55eb 100644 --- a/web/web.go +++ b/web/web.go @@ -19,7 +19,7 @@ import ( "encoding/json" "fmt" "io" - stdlog "log" + "log/slog" "math" "net" "net/http" @@ -36,14 +36,13 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/mwitkow/go-conntrack" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" io_prometheus_client "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/route" "github.com/prometheus/common/server" toolkit_web "github.com/prometheus/exporter-toolkit/web" @@ -115,14 +114,14 @@ const ( // will re-raise the error which will then be handled by the net/http package. // It is needed because the go-kit log package doesn't manage properly the // panics from net/http (see https://github.com/go-kit/kit/issues/233). -func withStackTracer(h http.Handler, l log.Logger) http.Handler { +func withStackTracer(h http.Handler, l *slog.Logger) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer func() { if err := recover(); err != nil { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] - level.Error(l).Log("msg", "panic while serving request", "client", r.RemoteAddr, "url", r.URL, "err", err, "stack", buf) + l.Error("panic while serving request", "client", r.RemoteAddr, "url", r.URL, "err", err, "stack", buf) panic(err) } }() @@ -208,7 +207,7 @@ type LocalStorage interface { // Handler serves various HTTP endpoints of the Prometheus server. type Handler struct { - logger log.Logger + logger *slog.Logger gatherer prometheus.Gatherer metrics *metrics @@ -300,9 +299,9 @@ type Options struct { } // New initializes a new web Handler. -func New(logger log.Logger, o *Options) *Handler { +func New(logger *slog.Logger, o *Options) *Handler { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } m := newMetrics(o.Registerer) @@ -648,7 +647,7 @@ func (h *Handler) Listeners() ([]net.Listener, error) { // Listener creates the TCP listener for web requests. func (h *Handler) Listener(address string, sem chan struct{}) (net.Listener, error) { - level.Info(h.logger).Log("msg", "Start listening for connections", "address", address) + h.logger.Info("Start listening for connections", "address", address) listener, err := net.Listen("tcp", address) if err != nil { @@ -680,7 +679,7 @@ func (h *Handler) Run(ctx context.Context, listeners []net.Listener, webConfig s apiPath := "/api" if h.options.RoutePrefix != "/" { apiPath = h.options.RoutePrefix + apiPath - level.Info(h.logger).Log("msg", "Router prefix", "prefix", h.options.RoutePrefix) + h.logger.Info("Router prefix", "prefix", h.options.RoutePrefix) } av1 := route.New(). WithInstrumentation(h.metrics.instrumentHandlerWithPrefix("/api/v1")). @@ -689,7 +688,7 @@ func (h *Handler) Run(ctx context.Context, listeners []net.Listener, webConfig s mux.Handle(apiPath+"/v1/", http.StripPrefix(apiPath+"/v1", av1)) - errlog := stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0) + errlog := slog.NewLogLogger(h.logger.Handler(), slog.LevelError) spanNameFormatter := otelhttp.WithSpanNameFormatter(func(_ string, r *http.Request) string { return fmt.Sprintf("%s %s", r.Method, r.URL.Path) From f3358f3c31587cd15ebe486f2968897dee1eb62a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 20:26:35 +0000 Subject: [PATCH 133/137] Bump github.com/digitalocean/godo from 1.122.0 to 1.126.0 Bumps [github.com/digitalocean/godo](https://github.com/digitalocean/godo) from 1.122.0 to 1.126.0. - [Release notes](https://github.com/digitalocean/godo/releases) - [Changelog](https://github.com/digitalocean/godo/blob/main/CHANGELOG.md) - [Commits](https://github.com/digitalocean/godo/compare/v1.122.0...v1.126.0) --- updated-dependencies: - dependency-name: github.com/digitalocean/godo dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f41b063242..34b1cb0cce 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.122.0 + github.com/digitalocean/godo v1.126.0 github.com/docker/docker v27.2.0+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.13.0 diff --git a/go.sum b/go.sum index 408419ad8c..17384d18df 100644 --- a/go.sum +++ b/go.sum @@ -121,8 +121,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg= -github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= +github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8= +github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= From 5dc59acada6b039452d1ddab3940be20a42d7846 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 20:27:02 +0000 Subject: [PATCH 134/137] Bump github.com/klauspost/compress from 1.17.9 to 1.17.10 Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.17.9 to 1.17.10. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.17.9...v1.17.10) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f41b063242..e47c7cfbc7 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/hetznercloud/hcloud-go/v2 v2.13.1 github.com/ionos-cloud/sdk-go/v6 v6.2.1 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.9 + github.com/klauspost/compress v1.17.10 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.41.0 github.com/miekg/dns v1.1.62 diff --git a/go.sum b/go.sum index 408419ad8c..1124313abb 100644 --- a/go.sum +++ b/go.sum @@ -383,8 +383,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= From 8406aa9c9cc6ed082739420243eaeb7d9577cc0f Mon Sep 17 00:00:00 2001 From: Julien Date: Tue, 8 Oct 2024 11:23:01 +0200 Subject: [PATCH 135/137] Add a note for pre-built assets Signed-off-by: Julien --- web/ui/README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/web/ui/README.md b/web/ui/README.md index 38087755e5..49ec27d8b4 100644 --- a/web/ui/README.md +++ b/web/ui/README.md @@ -12,6 +12,27 @@ in `.promu.yml`, and then `make build` (or build Prometheus using This will serve all files from your local filesystem. This is for development purposes only. +### Using Prebuilt UI Assets + +If you are only working on the go backend, for faster builds, you can use +prebuilt web UI assets available with each Prometheus release +(`prometheus-web-ui-.tar.gz`). This allows you to skip building the UI +from source. + +1. Download and extract the prebuilt UI tarball: + ```bash + tar -xvf prometheus-web-ui-.tar.gz -C web/ui + ``` + +2. Build Prometheus using the prebuilt assets by passing the following parameter + to `make`: + ```bash + make PREBUILT_ASSETS_STATIC_DIR=web/ui/static build + ``` + +This will include the prebuilt UI files directly in the Prometheus binary, +avoiding the need to install npm or rebuild the frontend from source. + ## React-app ### Introduction From f0a9f62ce849476fd4f6d7d69e97ec017e9a9f29 Mon Sep 17 00:00:00 2001 From: Viet Hung Nguyen Date: Thu, 3 Oct 2024 21:11:44 +0900 Subject: [PATCH 136/137] [DOCS] Querying basics: what can be graphed Put a scalar to query, it can be graphed. So the doc says "an expression that returns an instant vector is the only type which can be graphed." is not correct? And also, a query_range, which used for graph, always return a range vector , so it's confusing to read the above statement. Signed-off-by: Viet Hung Nguyen --- docs/querying/basics.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 66d7b8018d..e02ecf5496 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -35,8 +35,9 @@ evaluate to one of four types: Depending on the use-case (e.g. when graphing vs. displaying the output of an expression), only some of these types are legal as the result of a -user-specified expression. For example, an expression that returns an instant -vector is the only type which can be graphed. +user-specified expression. +For [instant queries](api.md#instant-queries), any of the above data types are allowed as the root of the expression. +[Range queries](api.md/#range-queries) only support scalar-typed and instant-vector-typed expressions. _Notes about the experimental native histograms:_ From 8650d25804ac1f38158c52f1dcf37af82e234745 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Tue, 8 Oct 2024 13:34:32 +0100 Subject: [PATCH 137/137] Add additional basic nhcb unit tests (#15086) * Add additional basic nhcb unit tests * Update promql/promqltest/testdata/histograms.test Signed-off-by: Fiona Liao Signed-off-by: Fiona Liao Co-authored-by: George Krajcsovits --- promql/promqltest/testdata/histograms.test | 52 ++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index 68232a815d..6089fd01d2 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -421,6 +421,25 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket eval instant at 50m sum(request_duration_seconds) {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} +eval instant at 50m sum(request_duration_seconds{job="job1",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job1",instance="ins2"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins2"}) + {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} + +eval instant at 50m avg(request_duration_seconds) + {} {{schema:-53 count:62.5 custom_values:[0.1 0.2] buckets:[25 22.5 15]}} + +# To verify the result above, calculate from classic histogram as well. +eval instant at 50m avg (request_duration_seconds_bucket{le="0.1"}) + {} 25 + +eval instant at 50m avg (request_duration_seconds_bucket{le="0.2"}) - avg (request_duration_seconds_bucket{le="0.1"}) + {} 22.5 + +eval instant at 50m avg (request_duration_seconds_bucket{le="+Inf"}) - avg (request_duration_seconds_bucket{le="0.2"}) + {} 15 + +eval instant at 50m count(request_duration_seconds) + {} 4 + # A histogram with nonmonotonic bucket counts. This may happen when recording # rule evaluation or federation races scrape ingestion, causing some buckets # counts to be derived from fewer samples. @@ -504,3 +523,36 @@ eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_buc eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m]))) {} NaN + +load_with_nhcb 1m + histogram_over_time_bucket{le="0"} 0 1 3 9 + histogram_over_time_bucket{le="1"} 2 3 3 9 + histogram_over_time_bucket{le="2"} 3 8 5 10 + histogram_over_time_bucket{le="4"} 3 10 6 18 + +# Test custom buckets with sum_over_time, avg_over_time. +eval instant at 3m sum_over_time(histogram_over_time[4m:1m]) + {} {{schema:-53 count:37 custom_values:[0 1 2 4] buckets:[13 4 9 11]}} + +eval instant at 3m avg_over_time(histogram_over_time[4m:1m]) + {} {{schema:-53 count:9.25 custom_values:[0 1 2 4] buckets:[3.25 1 2.25 2.75]}} + +# Test custom buckets with counter reset +load_with_nhcb 5m + histogram_with_reset_bucket{le="1"} 1 3 9 + histogram_with_reset_bucket{le="2"} 3 3 9 + histogram_with_reset_bucket{le="4"} 8 5 12 + histogram_with_reset_bucket{le="8"} 10 6 18 + histogram_with_reset_sum{} 36 16 61 + +eval instant at 10m increase(histogram_with_reset[15m]) + {} {{schema:-53 count:27 sum:91.5 custom_values:[1 2 4 8] counter_reset_hint:gauge buckets:[13.5 0 4.5 9]}} + +eval instant at 10m resets(histogram_with_reset[15m]) + {} 1 + +eval instant at 10m histogram_count(increase(histogram_with_reset[15m])) + {} 27 + +eval instant at 10m histogram_sum(increase(histogram_with_reset[15m])) + {} 91.5