From fb67d368a218d7279fe63d791f48a33e88113ad7 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Tue, 11 Apr 2023 13:45:34 -0700 Subject: [PATCH 01/26] use consistent error for instant and range query 400 Signed-off-by: Ben Ye --- web/api/v1/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 3b6ade562a..a210e00017 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -522,7 +522,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { } qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, opts, r.FormValue("query"), start, end, step) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "query") } // From now on, we must only return with a finalizer in the result (to // be called by the caller) or call qry.Close ourselves (which is From b6573353c1495da061c25e6a08859fd3ffcd47a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Mierzwa?= Date: Wed, 12 Apr 2023 14:05:06 +0100 Subject: [PATCH 02/26] Add query_samples_total metric MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit query_samples_total is a counter that tracks the total number of samples loaded by all queries. The goal with this metric is to be able to see the amount of 'work' done by Prometheus to service queries. At the moment we have metrics with the number of queries, plus more detailed metrics showing how much time each step of a query takes. While those metrics do help they don't show us the whole picture. Queries that do load more samples are (in general) more expensive than queries that do load fewer samples. This means that looking only at the number of queries doesn't tell us how much 'work' Prometheus received. Adding a counter that tracks the total number of samples loaded allows us to see if there was a spike in the cost of queries, not just the number of them. Signed-off-by: Ɓukasz Mierzwa --- promql/engine.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index 52278c7a24..60a575508a 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -70,6 +70,7 @@ type engineMetrics struct { queryPrepareTime prometheus.Observer queryInnerEval prometheus.Observer queryResultSort prometheus.Observer + querySamples prometheus.Counter } // convertibleToInt64 returns true if v does not over-/underflow an int64. @@ -332,6 +333,12 @@ func NewEngine(opts EngineOpts) *Engine { Name: "queries_concurrent_max", Help: "The max number of concurrent queries.", }), + querySamples: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_samples_total", + Help: "The total number of samples loaded by all queries.", + }), queryQueueTime: queryResultSummary.WithLabelValues("queue_time"), queryPrepareTime: queryResultSummary.WithLabelValues("prepare_time"), queryInnerEval: queryResultSummary.WithLabelValues("inner_eval"), @@ -357,6 +364,7 @@ func NewEngine(opts EngineOpts) *Engine { metrics.maxConcurrentQueries, metrics.queryLogEnabled, metrics.queryLogFailures, + metrics.querySamples, queryResultSummary, ) } @@ -537,7 +545,10 @@ func (ng *Engine) newTestQuery(f func(context.Context) error) Query { // statements are not handled by the Engine. func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storage.Warnings, err error) { ng.metrics.currentQueries.Inc() - defer ng.metrics.currentQueries.Dec() + defer func() { + ng.metrics.currentQueries.Dec() + ng.metrics.querySamples.Add(float64(q.sampleStats.TotalSamples)) + }() ctx, cancel := context.WithTimeout(ctx, ng.timeout) q.cancel = cancel From fb3eb212306cad9c9552ccef60318d5c3c8fff19 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sun, 9 Apr 2023 09:08:40 +0200 Subject: [PATCH 03/26] enable gocritic, unconvert and unused linters Signed-off-by: Matthieu MOREL --- .github/workflows/ci.yml | 2 ++ .golangci.yml | 11 ++++++- cmd/prometheus/main.go | 2 +- cmd/prometheus/query_log_test.go | 2 +- cmd/promtool/backfill.go | 2 +- cmd/promtool/rules.go | 2 +- cmd/promtool/unittest.go | 2 +- discovery/kubernetes/endpointslice.go | 2 +- discovery/kubernetes/kubernetes.go | 7 +++-- discovery/legacymanager/registry.go | 2 +- discovery/linode/linode.go | 8 ++--- discovery/marathon/marathon.go | 7 +++-- discovery/ovhcloud/dedicated_server_test.go | 6 ++-- discovery/ovhcloud/vps_test.go | 6 ++-- discovery/registry.go | 2 +- discovery/vultr/vultr.go | 4 +-- .../remote_storage_adapter/influxdb/client.go | 11 ++++--- model/labels/labels.go | 14 +++++---- model/textparse/promparse_test.go | 2 +- promql/bench_test.go | 6 ++-- promql/engine.go | 24 +++++++------- promql/engine_test.go | 2 +- promql/functions.go | 8 ++--- promql/functions_test.go | 2 +- promql/parser/ast.go | 11 ++++--- promql/parser/parse.go | 14 ++++----- promql/parser/parse_test.go | 2 +- promql/parser/printer.go | 21 +++++++------ rules/manager.go | 7 +++-- rules/manager_test.go | 7 +++-- scrape/scrape.go | 6 ++-- scrape/scrape_test.go | 27 ++++++++-------- scrape/target.go | 4 +-- storage/merge.go | 7 +++-- storage/remote/codec.go | 27 ++++++++-------- storage/remote/queue_manager.go | 8 ++--- template/template.go | 2 +- tsdb/agent/db_test.go | 3 +- tsdb/block_test.go | 4 +-- tsdb/chunkenc/bstream.go | 6 ++-- tsdb/chunkenc/float_histogram.go | 2 +- tsdb/chunkenc/histogram.go | 6 ++-- tsdb/chunkenc/varbit.go | 2 +- tsdb/chunkenc/xor.go | 12 +++---- tsdb/chunks/head_chunks_test.go | 4 +-- tsdb/compact.go | 2 +- tsdb/compact_test.go | 6 ---- tsdb/db.go | 6 ++-- tsdb/db_test.go | 4 +-- tsdb/exemplar.go | 18 +++++------ tsdb/head.go | 2 +- tsdb/head_test.go | 31 ++++++++++--------- tsdb/head_wal.go | 4 +-- tsdb/index/index.go | 2 +- tsdb/index/postings.go | 13 ++++---- tsdb/index/postingsstats.go | 6 ++-- tsdb/isolation.go | 2 +- tsdb/querier.go | 26 +++++++++------- tsdb/querier_test.go | 4 +-- tsdb/wal.go | 4 +-- tsdb/wlog/wlog_test.go | 4 +-- util/runtime/limits_default.go | 2 +- util/runtime/statfs_default.go | 8 +++-- web/api/v1/api.go | 4 +-- web/api/v1/api_test.go | 4 +-- web/web.go | 4 +-- 66 files changed, 245 insertions(+), 229 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 17185c5bc0..07b1242c28 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -149,6 +149,8 @@ jobs: - name: Lint uses: golangci/golangci-lint-action@v3.4.0 with: + args: --verbose + skip-cache: true version: v1.51.2 fuzzing: uses: ./.github/workflows/fuzzing.yml diff --git a/.golangci.yml b/.golangci.yml index efa6b2044d..c0c20d425a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,5 @@ run: - deadline: 5m + timeout: 15m skip-files: # Skip autogenerated files. - ^.*\.(pb|y)\.go$ @@ -10,14 +10,23 @@ output: linters: enable: - depguard + - gocritic - gofumpt - goimports - revive - misspell + - unconvert + - unused issues: max-same-issues: 0 exclude-rules: + - linters: + - gocritic + text: "appendAssign" + - linters: + - gocritic + text: "singleCaseSwitch" - path: _test.go linters: - errcheck diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index f4f6af20df..cbe8f503d0 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -490,7 +490,7 @@ func main() { if cfgFile.StorageConfig.ExemplarsConfig == nil { cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig } - cfg.tsdb.MaxExemplars = int64(cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars) + cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars } if cfgFile.StorageConfig.TSDBConfig != nil { cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index d5dfbea509..f20f2a22c0 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -193,7 +193,7 @@ func (p *queryLogTest) String() string { } name = name + ", " + p.host + ":" + strconv.Itoa(p.port) if p.enabledAtStart { - name = name + ", enabled at start" + name += ", enabled at start" } if p.prefix != "" { name = name + ", with prefix " + p.prefix diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 3c23d2c037..39410881b2 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -101,7 +101,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn nextSampleTs int64 = math.MaxInt64 ) - for t := mint; t <= maxt; t = t + blockDuration { + for t := mint; t <= maxt; t += blockDuration { tsUpper := t + blockDuration if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper { // The next sample is not in this timerange, we can avoid parsing diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index aedc7bcb9d..e430fe1898 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -100,7 +100,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName startInMs := start.Unix() * int64(time.Second/time.Millisecond) endInMs := end.Unix() * int64(time.Second/time.Millisecond) - for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock = startOfBlock + blockDuration { + for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock += blockDuration { endOfBlock := startOfBlock + blockDuration - 1 currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix()) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index b3e6f67f97..84dfd9ec79 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -130,7 +130,7 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error { if err != nil { return err } - if len(m) <= 0 { + if len(m) == 0 { fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf) } globbedFiles = append(globbedFiles, m...) diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 135735154c..d5bff8a5f6 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -300,7 +300,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou } if port.protocol() != nil { - target[endpointSlicePortProtocolLabel] = lv(string(*port.protocol())) + target[endpointSlicePortProtocolLabel] = lv(*port.protocol()) } if port.port() != nil { diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 0f03e2cdb7..a44bd513ce 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -299,12 +299,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { err error ownNamespace string ) - if conf.KubeConfig != "" { + switch { + case conf.KubeConfig != "": kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig) if err != nil { return nil, err } - } else if conf.APIServer.URL == nil { + case conf.APIServer.URL == nil: // Use the Kubernetes provided pod service account // as described in https://kubernetes.io/docs/admin/service-accounts-admin/ kcfg, err = rest.InClusterConfig() @@ -324,7 +325,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { } level.Info(l).Log("msg", "Using pod service account via in-cluster config") - } else { + default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { return nil, err diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go index 687f093829..955705394d 100644 --- a/discovery/legacymanager/registry.go +++ b/discovery/legacymanager/registry.go @@ -254,7 +254,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { oldStr := oldTyp.String() newStr := newTyp.String() for i, s := range e.Errors { - e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) } } return err diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 0fd0a2c370..12b9575143 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -249,20 +249,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro if detailedIP.Address != ip.String() { continue } - - if detailedIP.Public && publicIPv4 == "" { + switch { + case detailedIP.Public && publicIPv4 == "": publicIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { publicIPv4RDNS = detailedIP.RDNS } - } else if !detailedIP.Public && privateIPv4 == "" { + case !detailedIP.Public && privateIPv4 == "": privateIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { privateIPv4RDNS = detailedIP.RDNS } - } else { + default: extraIPs = append(extraIPs, detailedIP.Address) } } diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 079f93ad0b..c31daee1f4 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -400,19 +400,20 @@ func targetsForApp(app *app) []model.LabelSet { var labels []map[string]string var prefix string - if len(app.Container.PortMappings) != 0 { + switch { + case len(app.Container.PortMappings) != 0: // In Marathon 1.5.x the "container.docker.portMappings" object was moved // to "container.portMappings". ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.Container.Docker.PortMappings) != 0 { + case len(app.Container.Docker.PortMappings) != 0: // Prior to Marathon 1.5 the port mappings could be found at the path // "container.docker.portMappings". ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.PortDefinitions) != 0 { + case len(app.PortDefinitions) != 0: // PortDefinitions deprecates the "ports" array and can be used to specify // a list of ports with metadata in case a mapping is not required. ports = make([]uint32, len(app.PortDefinitions)) diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go index 03a01005a9..e8ffa4a283 100644 --- a/discovery/ovhcloud/dedicated_server_test.go +++ b/discovery/ovhcloud/dedicated_server_test.go @@ -84,7 +84,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) { return } w.Header().Set("Content-Type", "application/json") - if string(r.URL.Path) == "/dedicated/server" { + if r.URL.Path == "/dedicated/server" { dedicatedServersList, err := os.ReadFile("testdata/dedicated_server/dedicated_servers.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -96,7 +96,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) { return } } - if string(r.URL.Path) == "/dedicated/server/abcde" { + if r.URL.Path == "/dedicated/server/abcde" { dedicatedServer, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_details.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -108,7 +108,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) { return } } - if string(r.URL.Path) == "/dedicated/server/abcde/ips" { + if r.URL.Path == "/dedicated/server/abcde/ips" { dedicatedServerIPs, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_abcde_ips.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go index 31b30fdfc6..b1177f215e 100644 --- a/discovery/ovhcloud/vps_test.go +++ b/discovery/ovhcloud/vps_test.go @@ -91,7 +91,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) { return } w.Header().Set("Content-Type", "application/json") - if string(r.URL.Path) == "/vps" { + if r.URL.Path == "/vps" { dedicatedServersList, err := os.ReadFile("testdata/vps/vps.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -103,7 +103,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) { return } } - if string(r.URL.Path) == "/vps/abc" { + if r.URL.Path == "/vps/abc" { dedicatedServer, err := os.ReadFile("testdata/vps/vps_details.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -115,7 +115,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) { return } } - if string(r.URL.Path) == "/vps/abc/ips" { + if r.URL.Path == "/vps/abc/ips" { dedicatedServerIPs, err := os.ReadFile("testdata/vps/vps_abc_ips.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/discovery/registry.go b/discovery/registry.go index 8274628c23..13168a07a7 100644 --- a/discovery/registry.go +++ b/discovery/registry.go @@ -253,7 +253,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { oldStr := oldTyp.String() newStr := newTyp.String() for i, s := range e.Errors { - e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) } } return err diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index 2f489e7d45..42881d3c19 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -202,10 +202,8 @@ func (d *Discovery) listInstances(ctx context.Context) ([]govultr.Instance, erro if meta.Links.Next == "" { break - } else { - listOptions.Cursor = meta.Links.Next - continue } + listOptions.Cursor = meta.Links.Next } return instances, nil diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index fffbc9c2ae..e84ed9e129 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -184,11 +184,11 @@ func (c *Client) buildCommand(q *prompb.Query) (string, error) { } func escapeSingleQuotes(str string) string { - return strings.Replace(str, `'`, `\'`, -1) + return strings.ReplaceAll(str, `'`, `\'`) } func escapeSlashes(str string) string { - return strings.Replace(str, `/`, `\/`, -1) + return strings.ReplaceAll(str, `/`, `\/`) } func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, results []influx.Result) error { @@ -290,13 +290,14 @@ func mergeSamples(a, b []prompb.Sample) []prompb.Sample { result := make([]prompb.Sample, 0, len(a)+len(b)) i, j := 0, 0 for i < len(a) && j < len(b) { - if a[i].Timestamp < b[j].Timestamp { + switch { + case a[i].Timestamp < b[j].Timestamp: result = append(result, a[i]) i++ - } else if a[i].Timestamp > b[j].Timestamp { + case a[i].Timestamp > b[j].Timestamp: result = append(result, b[j]) j++ - } else { + default: result = append(result, a[i]) i++ j++ diff --git a/model/labels/labels.go b/model/labels/labels.go index b7398d17f9..93524ddcfc 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -169,11 +169,12 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { b = b[:0] i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: b = append(b, ls[i].Name...) b = append(b, seps[0]) b = append(b, ls[i].Value...) @@ -213,11 +214,12 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { b.WriteByte(labelSep) i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: if b.Len() > 1 { b.WriteByte(seps[0]) } diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index e0ecf62f5d..280f39b4f1 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -512,7 +512,7 @@ func BenchmarkGzip(b *testing.B) { k := b.N / promtestdataSampleCount b.ReportAllocs() - b.SetBytes(int64(n) / promtestdataSampleCount) + b.SetBytes(n / promtestdataSampleCount) b.ResetTimer() total := 0 diff --git a/promql/bench_test.go b/promql/bench_test.go index 88025d9325..d197da8881 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -194,9 +194,9 @@ func rangeQueryCases() []benchCase { if !strings.Contains(c.expr, "X") { tmp = append(tmp, c) } else { - tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "one", -1), steps: c.steps}) - tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "ten", -1), steps: c.steps}) - tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "hundred", -1), steps: c.steps}) + tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "one"), steps: c.steps}) + tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "ten"), steps: c.steps}) + tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "hundred"), steps: c.steps}) } } cases = tmp diff --git a/promql/engine.go b/promql/engine.go index b49be244f1..4dfa6b1192 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -783,7 +783,6 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { maxTimestamp = end } evalRange = 0 - case *parser.MatrixSelector: evalRange = n.Range } @@ -816,20 +815,20 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS } else { offsetMilliseconds := durationMilliseconds(subqOffset) start = start - offsetMilliseconds - durationMilliseconds(subqRange) - end = end - offsetMilliseconds + end -= offsetMilliseconds } if evalRange == 0 { - start = start - durationMilliseconds(s.LookbackDelta) + start -= durationMilliseconds(s.LookbackDelta) } else { // For all matrix queries we want to ensure that we have (end-start) + range selected // this way we have `range` data before the start time - start = start - durationMilliseconds(evalRange) + start -= durationMilliseconds(evalRange) } offsetMilliseconds := durationMilliseconds(n.OriginalOffset) - start = start - offsetMilliseconds - end = end - offsetMilliseconds + start -= offsetMilliseconds + end -= offsetMilliseconds return start, end } @@ -1745,7 +1744,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { res, ws := newEv.eval(e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) - for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts = ts + ev.interval { + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { step++ ev.samplesStats.IncrementSamplesAtStep(step, newEv.samplesStats.TotalSamples) } @@ -1767,7 +1766,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { if len(mat[i].Floats)+len(mat[i].Histograms) != 1 { panic(fmt.Errorf("unexpected number of samples")) } - for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts = ts + ev.interval { + for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval { if len(mat[i].Floats) > 0 { mat[i].Floats = append(mat[i].Floats, FPoint{ T: ts, @@ -2514,14 +2513,15 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if !ok { var m labels.Labels enh.resetBuilder(metric) - if without { + switch { + case without: enh.lb.Del(grouping...) enh.lb.Del(labels.MetricName) m = enh.lb.Labels() - } else if len(grouping) > 0 { + case len(grouping) > 0: enh.lb.Keep(grouping...) m = enh.lb.Labels() - } else { + default: m = labels.EmptyLabels() } newAgg := &groupedAggregation{ @@ -2689,7 +2689,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without aggr.floatValue = float64(aggr.groupCount) case parser.STDVAR: - aggr.floatValue = aggr.floatValue / float64(aggr.groupCount) + aggr.floatValue /= float64(aggr.groupCount) case parser.STDDEV: aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) diff --git a/promql/engine_test.go b/promql/engine_test.go index b64e32ba46..056fd23555 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3267,7 +3267,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) if floatHisto { - require.Equal(t, float64(h.ToFloat().Count), vector[0].F) + require.Equal(t, h.ToFloat().Count, vector[0].F) } else { require.Equal(t, float64(h.Count), vector[0].F) } diff --git a/promql/functions.go b/promql/functions.go index fd99703df2..0e7a601e3d 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -877,10 +877,10 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f } return 0, initY } - sumX = sumX + cX - sumY = sumY + cY - sumXY = sumXY + cXY - sumX2 = sumX2 + cX2 + sumX += cX + sumY += cY + sumXY += cXY + sumX2 += cX2 covXY := sumXY - sumX*sumY/n varX := sumX2 - sumX*sumX/n diff --git a/promql/functions_test.go b/promql/functions_test.go index e552424b3d..8181481c0e 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -51,7 +51,7 @@ func TestDeriv(t *testing.T) { // https://github.com/prometheus/prometheus/issues/7180 for i = 0; i < 15; i++ { jitter := 12 * i % 2 - a.Append(0, metric, int64(start+interval*i+jitter), 1) + a.Append(0, metric, start+interval*i+jitter, 1) } require.NoError(t, a.Commit()) diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 190af2d590..86f1394998 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -349,7 +349,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) { // for all the non-nil children of node, recursively. func Inspect(node Node, f inspector) { //nolint: errcheck - Walk(inspector(f), node, nil) + Walk(f, node, nil) } // Children returns a list of all child nodes of a syntax tree node. @@ -368,13 +368,14 @@ func Children(node Node) []Node { case *AggregateExpr: // While this does not look nice, it should avoid unnecessary allocations // caused by slice resizing - if n.Expr == nil && n.Param == nil { + switch { + case n.Expr == nil && n.Param == nil: return nil - } else if n.Expr == nil { + case n.Expr == nil: return []Node{n.Param} - } else if n.Param == nil { + case n.Param == nil: return []Node{n.Expr} - } else { + default: return []Node{n.Expr, n.Param} } case *BinaryExpr: diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 6c37ce6fc6..fa28097b28 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -523,15 +523,13 @@ func (p *parser) checkAST(node Node) (typ ValueType) { p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors") } n.VectorMatching = nil - } else { // Both operands are Vectors. - if n.Op.IsSetOperator() { - if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { - p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op) - } - if n.VectorMatching.Card != CardManyToMany { - p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many") - } + } else if n.Op.IsSetOperator() { + if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { + p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op) + } + if n.VectorMatching.Card != CardManyToMany { + p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many") } } diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index df66d9381a..7e6870ddbb 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -3592,7 +3592,7 @@ func TestNaNExpression(t *testing.T) { nl, ok := expr.(*NumberLiteral) require.True(t, ok, "expected number literal but got %T", expr) - require.True(t, math.IsNaN(float64(nl.Val)), "expected 'NaN' in number literal but got %v", nl.Val) + require.True(t, math.IsNaN(nl.Val), "expected 'NaN' in number literal but got %v", nl.Val) } var testSeries = []struct { diff --git a/promql/parser/printer.go b/promql/parser/printer.go index 1f15eeef33..4fff193e16 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -130,11 +130,12 @@ func (node *MatrixSelector) String() string { offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset)) } at := "" - if vecSelector.Timestamp != nil { + switch { + case vecSelector.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*vecSelector.Timestamp)/1000.0) - } else if vecSelector.StartOrEnd == START { + case vecSelector.StartOrEnd == START: at = " @ start()" - } else if vecSelector.StartOrEnd == END { + case vecSelector.StartOrEnd == END: at = " @ end()" } @@ -168,11 +169,12 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string { offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset)) } at := "" - if node.Timestamp != nil { + switch { + case node.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) - } else if node.StartOrEnd == START { + case node.StartOrEnd == START: at = " @ start()" - } else if node.StartOrEnd == END { + case node.StartOrEnd == END: at = " @ end()" } return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset) @@ -213,11 +215,12 @@ func (node *VectorSelector) String() string { offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset)) } at := "" - if node.Timestamp != nil { + switch { + case node.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) - } else if node.StartOrEnd == START { + case node.StartOrEnd == START: at = " @ start()" - } else if node.StartOrEnd == END { + case node.StartOrEnd == END: at = " @ end()" } diff --git a/rules/manager.go b/rules/manager.go index 07d50be1b9..82bbfd3947 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -866,12 +866,13 @@ func (g *Group) RestoreForState(ts time.Time) { timeSpentPending := downAt.Sub(restoredActiveAt) timeRemainingPending := alertHoldDuration - timeSpentPending - if timeRemainingPending <= 0 { + switch { + case timeRemainingPending <= 0: // It means that alert was firing when prometheus went down. // In the next Eval, the state of this alert will be set back to // firing again if it's still firing in that Eval. // Nothing to be done in this case. - } else if timeRemainingPending < g.opts.ForGracePeriod { + case timeRemainingPending < g.opts.ForGracePeriod: // (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration // /* new firing time */ /* moving back by hold duration */ // @@ -884,7 +885,7 @@ func (g *Group) RestoreForState(ts time.Time) { // = (ts + m.opts.ForGracePeriod) - ts // = m.opts.ForGracePeriod restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration) - } else { + default: // By shifting ActiveAt to the future (ActiveAt + some_duration), // the total pending time from the original ActiveAt // would be `alertHoldDuration + some_duration`. diff --git a/rules/manager_test.go b/rules/manager_test.go index 440e06c9af..85a74ac52c 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -481,17 +481,18 @@ func TestForStateRestore(t *testing.T) { }) // Checking if we have restored it correctly. - if tst.noRestore { + switch { + case tst.noRestore: require.Equal(t, tst.num, len(got)) for _, e := range got { require.Equal(t, e.ActiveAt, restoreTime) } - } else if tst.gracePeriod { + case tst.gracePeriod: require.Equal(t, tst.num, len(got)) for _, e := range got { require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime)) } - } else { + default: exp := tst.alerts require.Equal(t, len(exp), len(got)) sortAlerts(exp) diff --git a/scrape/scrape.go b/scrape/scrape.go index f38527ff30..15c8867932 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -640,7 +640,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { met := lset.Get(labels.MetricName) if limits.labelLimit > 0 { nbLabels := lset.Len() - if nbLabels > int(limits.labelLimit) { + if nbLabels > limits.labelLimit { return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit) } } @@ -652,14 +652,14 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { return lset.Validate(func(l labels.Label) error { if limits.labelNameLengthLimit > 0 { nameLength := len(l.Name) - if nameLength > int(limits.labelNameLengthLimit) { + if nameLength > limits.labelNameLengthLimit { return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit) } } if limits.labelValueLengthLimit > 0 { valueLength := len(l.Value) - if valueLength > int(limits.labelValueLengthLimit) { + if valueLength > limits.labelValueLengthLimit { return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit) } } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index dcb3b48c10..07b9c3c875 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -322,7 +322,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) { ScrapeTimeout: model.Duration(2 * time.Second), } newLoop := func(opts scrapeLoopOptions) loop { - l := &testLoop{interval: time.Duration(opts.interval), timeout: time.Duration(opts.timeout)} + l := &testLoop{interval: opts.interval, timeout: opts.timeout} l.startFunc = func(interval, timeout time.Duration, errc chan<- error) { require.Equal(t, 5*time.Second, interval, "Unexpected scrape interval") require.Equal(t, 3*time.Second, timeout, "Unexpected scrape timeout") @@ -546,7 +546,7 @@ func TestScrapePoolRaces(t *testing.T) { require.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets") for i := 0; i < 20; i++ { - time.Sleep(time.Duration(10 * time.Millisecond)) + time.Sleep(10 * time.Millisecond) sp.reload(newConfig()) } sp.stop() @@ -1199,14 +1199,14 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { // Succeed once, several failures, then stop. scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { numScrapes++ - - if numScrapes == 1 { + switch { + case numScrapes == 1: w.Write([]byte("metric_a 42\n")) return nil - } else if numScrapes == 2 { + case numScrapes == 2: w.Write([]byte("7&-\n")) return nil - } else if numScrapes == 3 { + case numScrapes == 3: cancel() } return errors.New("scrape failed") @@ -1282,14 +1282,14 @@ func TestScrapeLoopCache(t *testing.T) { } numScrapes++ - - if numScrapes == 1 { + switch { + case numScrapes == 1: w.Write([]byte("metric_a 42\nmetric_b 43\n")) return nil - } else if numScrapes == 3 { + case numScrapes == 3: w.Write([]byte("metric_a 44\n")) return nil - } else if numScrapes == 4 { + case numScrapes == 4: cancel() } return fmt.Errorf("scrape failed") @@ -2280,11 +2280,12 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { go func() { _, err := ts.scrape(ctx, io.Discard) - if err == nil { + switch { + case err == nil: errc <- errors.New("Expected error but got nil") - } else if ctx.Err() != context.Canceled { + case ctx.Err() != context.Canceled: errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err()) - } else { + default: close(errc) } }() diff --git a/scrape/target.go b/scrape/target.go index f250910c10..6c47031186 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -413,9 +413,9 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort // Addresses reaching this point are already wrapped in [] if necessary. switch scheme { case "http", "": - addr = addr + ":80" + addr += ":80" case "https": - addr = addr + ":443" + addr += ":443" default: return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme) } diff --git a/storage/merge.go b/storage/merge.go index 8db1f7ae83..23a92df1e2 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -197,13 +197,14 @@ func mergeStrings(a, b []string) []string { res := make([]string, 0, maxl*10/9) for len(a) > 0 && len(b) > 0 { - if a[0] == b[0] { + switch { + case a[0] == b[0]: res = append(res, a[0]) a, b = a[1:], b[1:] - } else if a[0] < b[0] { + case a[0] < b[0]: res = append(res, a[0]) a = a[1:] - } else { + default: res = append(res, b[0]) b = b[1:] } diff --git a/storage/remote/codec.go b/storage/remote/codec.go index bfbd08d24b..02c84a3e6c 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -291,13 +291,14 @@ func MergeLabels(primary, secondary []prompb.Label) []prompb.Label { result := make([]prompb.Label, 0, len(primary)+len(secondary)) i, j := 0, 0 for i < len(primary) && j < len(secondary) { - if primary[i].Name < secondary[j].Name { + switch { + case primary[i].Name < secondary[j].Name: result = append(result, primary[i]) i++ - } else if primary[i].Name > secondary[j].Name { + case primary[i].Name > secondary[j].Name: result = append(result, secondary[j]) j++ - } else { + default: result = append(result, primary[i]) i++ j++ @@ -428,8 +429,8 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { c.histogramsCur += sort.Search(len(c.series.histograms)-c.histogramsCur, func(n int) bool { return c.series.histograms[n+c.histogramsCur].Timestamp >= t }) - - if c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms) { + switch { + case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms): // If float samples and histogram samples have overlapping timestamps prefer the float samples. if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp { c.curValType = chunkenc.ValFloat @@ -445,12 +446,11 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { c.floatsCur-- } } - } else if c.floatsCur < len(c.series.floats) { + case c.floatsCur < len(c.series.floats): c.curValType = chunkenc.ValFloat - } else if c.histogramsCur < len(c.series.histograms) { + case c.histogramsCur < len(c.series.histograms): c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) } - return c.curValType } @@ -514,26 +514,25 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType { peekHistTS = c.series.histograms[c.histogramsCur+1].Timestamp } c.curValType = chunkenc.ValNone - - if peekFloatTS < peekHistTS { + switch { + case peekFloatTS < peekHistTS: c.floatsCur++ c.curValType = chunkenc.ValFloat - } else if peekHistTS < peekFloatTS { + case peekHistTS < peekFloatTS: c.histogramsCur++ c.curValType = chunkenc.ValHistogram - } else if peekFloatTS == noTS && peekHistTS == noTS { + case peekFloatTS == noTS && peekHistTS == noTS: // This only happens when the iterator is exhausted; we set the cursors off the end to prevent // Seek() from returning anything afterwards. c.floatsCur = len(c.series.floats) c.histogramsCur = len(c.series.histograms) - } else { + default: // Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms // anyway otherwise the histogram sample will get selected on the next call to Next(). c.floatsCur++ c.histogramsCur++ c.curValType = chunkenc.ValFloat } - return c.curValType } diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 62bd17a66d..0fe6d0698b 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -609,7 +609,7 @@ outer: t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) - backoff = backoff * 2 + backoff *= 2 // It is reasonable to use t.cfg.MaxBackoff here, as if we have hit // the full backoff we are likely waiting for external resources. if backoff > t.cfg.MaxBackoff { @@ -660,7 +660,7 @@ outer: t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) - backoff = backoff * 2 + backoff *= 2 if backoff > t.cfg.MaxBackoff { backoff = t.cfg.MaxBackoff } @@ -707,7 +707,7 @@ outer: t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) - backoff = backoff * 2 + backoff *= 2 if backoff > t.cfg.MaxBackoff { backoff = t.cfg.MaxBackoff } @@ -754,7 +754,7 @@ outer: t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) - backoff = backoff * 2 + backoff *= 2 if backoff > t.cfg.MaxBackoff { backoff = t.cfg.MaxBackoff } diff --git a/template/template.go b/template/template.go index d61a880a22..01f6ec9a8a 100644 --- a/template/template.go +++ b/template/template.go @@ -421,7 +421,7 @@ func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr } } }() - + //nolint:unconvert // Before Go 1.19 conversion from text_template to html_template is mandatory tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap)) tmpl.Option(te.options...) tmpl.Funcs(html_template.FuncMap{ diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index f654fdb90e..e284e1b77e 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -739,8 +739,7 @@ func TestStorage_DuplicateExemplarsIgnored(t *testing.T) { var dec record.Decoder for r.Next() { rec := r.Record() - switch dec.Type(rec) { - case record.Exemplars: + if dec.Type(rec) == record.Exemplars { var exemplars []record.RefExemplar exemplars, err = dec.Exemplars(rec, exemplars) require.NoError(t, err) diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 49a997fc56..e9dc1a9d00 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -630,7 +630,7 @@ func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, flo {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, - PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0}, + PositiveBuckets: []int64{ts + 1, 1, -1, 0}, } if ts != mint { // By setting the counter reset hint to "no counter @@ -669,7 +669,7 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, - PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0}, + PositiveBuckets: []int64{ts + 1, 1, -1, 0}, } if count > 1 && count%5 != 1 { // Same rationale for this as above in diff --git a/tsdb/chunkenc/bstream.go b/tsdb/chunkenc/bstream.go index 60531023ba..7b17f4686b 100644 --- a/tsdb/chunkenc/bstream.go +++ b/tsdb/chunkenc/bstream.go @@ -182,7 +182,7 @@ func (b *bstreamReader) readBits(nbits uint8) (uint64, error) { } bitmask = (uint64(1) << nbits) - 1 - v = v | ((b.buffer >> (b.valid - nbits)) & bitmask) + v |= ((b.buffer >> (b.valid - nbits)) & bitmask) b.valid -= nbits return v, nil @@ -242,13 +242,13 @@ func (b *bstreamReader) loadNextBuffer(nbits uint8) bool { if b.streamOffset+nbytes == len(b.stream) { // There can be concurrent writes happening on the very last byte // of the stream, so use the copy we took at initialization time. - buffer = buffer | uint64(b.last) + buffer |= uint64(b.last) // Read up to the byte before skip = 1 } for i := 0; i < nbytes-skip; i++ { - buffer = buffer | (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1))) + buffer |= (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1))) } b.buffer = buffer diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index b462c6d9fd..6dd08a31c6 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -785,7 +785,7 @@ func (it *floatHistogramIterator) Next() ValueType { it.err = err return ValNone } - it.tDelta = it.tDelta + tDod + it.tDelta += tDod it.t += it.tDelta if ok := it.readXor(&it.cnt.value, &it.cnt.leading, &it.cnt.trailing); !ok { diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index 7b6a9cacb3..866fae36fd 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -875,7 +875,7 @@ func (it *histogramIterator) Next() ValueType { it.err = err return ValNone } - it.tDelta = it.tDelta + tDod + it.tDelta += tDod it.t += it.tDelta cntDod, err := readVarbitInt(&it.br) @@ -883,7 +883,7 @@ func (it *histogramIterator) Next() ValueType { it.err = err return ValNone } - it.cntDelta = it.cntDelta + cntDod + it.cntDelta += cntDod it.cnt = uint64(int64(it.cnt) + it.cntDelta) zcntDod, err := readVarbitInt(&it.br) @@ -891,7 +891,7 @@ func (it *histogramIterator) Next() ValueType { it.err = err return ValNone } - it.zCntDelta = it.zCntDelta + zcntDod + it.zCntDelta += zcntDod it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta) ok := it.readSum() diff --git a/tsdb/chunkenc/varbit.go b/tsdb/chunkenc/varbit.go index b3b14cf417..449f9fbac2 100644 --- a/tsdb/chunkenc/varbit.go +++ b/tsdb/chunkenc/varbit.go @@ -122,7 +122,7 @@ func readVarbitInt(b *bstreamReader) (int64, error) { } if bits > (1 << (sz - 1)) { // Or something. - bits = bits - (1 << sz) + bits -= (1 << sz) } val = int64(bits) } diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 2fa2f613cb..8ca04502a7 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -163,15 +163,15 @@ func (a *xorAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) func (a *xorAppender) Append(t int64, v float64) { var tDelta uint64 num := binary.BigEndian.Uint16(a.b.bytes()) - - if num == 0 { + switch { + case num == 0: buf := make([]byte, binary.MaxVarintLen64) for _, b := range buf[:binary.PutVarint(buf, t)] { a.b.writeByte(b) } a.b.writeBits(math.Float64bits(v), 64) - } else if num == 1 { + case num == 1: tDelta = uint64(t - a.t) buf := make([]byte, binary.MaxVarintLen64) @@ -181,7 +181,7 @@ func (a *xorAppender) Append(t int64, v float64) { a.writeVDelta(v) - } else { + default: tDelta = uint64(t - a.t) dod := int64(tDelta - a.tDelta) @@ -321,7 +321,7 @@ func (it *xorIterator) Next() ValueType { return ValNone } it.tDelta = tDelta - it.t = it.t + int64(it.tDelta) + it.t += int64(it.tDelta) return it.readValue() } @@ -384,7 +384,7 @@ func (it *xorIterator) Next() ValueType { } it.tDelta = uint64(int64(it.tDelta) + dod) - it.t = it.t + int64(it.tDelta) + it.t += int64(it.tDelta) return it.readValue() } diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index ac89ae3e59..20a4c2064b 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -503,10 +503,10 @@ func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper { func randomChunk(t *testing.T) chunkenc.Chunk { chunk := chunkenc.NewXORChunk() - len := rand.Int() % 120 + length := rand.Int() % 120 app, err := chunk.Appender() require.NoError(t, err) - for i := 0; i < len; i++ { + for i := 0; i < length; i++ { app.Append(rand.Int63(), rand.Float64()) } return chunk diff --git a/tsdb/compact.go b/tsdb/compact.go index b2d4123754..7c061b0bbd 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -44,7 +44,7 @@ func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 { curRange := minSize for i := 0; i < steps; i++ { ranges = append(ranges, curRange) - curRange = curRange * int64(stepSize) + curRange *= int64(stepSize) } return ranges diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 6a7e6ea68b..5a9eadedad 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1452,12 +1452,6 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { {100, 15, 3, 5}, {100, 50, 3, 3}, {100, 100, 3, 2}, - //{1000, 15, 1, 0}, - //{1000, 50, 1, 0}, - //{1000, 100, 1, 0}, - //{1000, 15, 3, 5}, - //{1000, 50, 3, 3}, - //{1000, 100, 3, 2}, } type testSummary struct { diff --git a/tsdb/db.go b/tsdb/db.go index 659251c3ca..a10f07b1e1 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -260,7 +260,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { Help: "Size of symbol table in memory for loaded blocks", }, func() float64 { db.mtx.RLock() - blocks := db.blocks[:] + blocks := db.blocks db.mtx.RUnlock() symTblSize := uint64(0) for _, b := range blocks { @@ -1186,7 +1186,7 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID } }() - for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t = t + blockSize { + for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize { mint, maxt := t, t+blockSize // Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, nil) @@ -1508,7 +1508,7 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc blocksSize := db.Head().Size() for i, block := range blocks { blocksSize += block.Size() - if blocksSize > int64(db.opts.MaxBytes) { + if blocksSize > db.opts.MaxBytes { // Add this and all following blocks for deletion. for _, b := range blocks[i:] { deletable[b.meta.ULID] = struct{}{} diff --git a/tsdb/db_test.go b/tsdb/db_test.go index c54fccf6f1..7e1f89a95a 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1076,7 +1076,7 @@ func TestWALSegmentSizeOptions(t *testing.T) { dbDir := db.Dir() require.NoError(t, db.Close()) - testFunc(dbDir, int(opts.WALSegmentSize)) + testFunc(dbDir, opts.WALSegmentSize) }) } } @@ -2996,7 +2996,7 @@ func TestCompactHead(t *testing.T) { series = seriesSet.At().Iterator(series) for series.Next() == chunkenc.ValFloat { time, val := series.At() - actSamples = append(actSamples, sample{int64(time), val, nil, nil}) + actSamples = append(actSamples, sample{time, val, nil, nil}) } require.NoError(t, series.Err()) } diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 5ba3567e41..ad3b2ef39b 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -115,17 +115,17 @@ func NewExemplarMetrics(reg prometheus.Registerer) *ExemplarMetrics { // 1GB of extra memory, accounting for the fact that this is heap allocated space. // If len <= 0, then the exemplar storage is essentially a noop storage but can later be // resized to store exemplars. -func NewCircularExemplarStorage(len int64, m *ExemplarMetrics) (ExemplarStorage, error) { - if len < 0 { - len = 0 +func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStorage, error) { + if length < 0 { + length = 0 } c := &CircularExemplarStorage{ - exemplars: make([]*circularBufferEntry, len), - index: make(map[string]*indexEntry, len/estimatedExemplarsPerSeries), + exemplars: make([]*circularBufferEntry, length), + index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries), metrics: m, } - c.metrics.maxExemplars.Set(float64(len)) + c.metrics.maxExemplars.Set(float64(length)) return c, nil } @@ -151,7 +151,7 @@ func (ce *CircularExemplarStorage) Querier(_ context.Context) (storage.ExemplarQ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) { ret := make([]exemplar.QueryResult, 0) - if len(ce.exemplars) <= 0 { + if len(ce.exemplars) == 0 { return ret, nil } @@ -219,7 +219,7 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar. // Not thread safe. The append parameters tells us whether this is an external validation, or internal // as a result of an AddExemplar call, in which case we should update any relevant metrics. func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error { - if len(ce.exemplars) <= 0 { + if len(ce.exemplars) == 0 { return storage.ErrExemplarsDisabled } @@ -334,7 +334,7 @@ func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry) { } func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error { - if len(ce.exemplars) <= 0 { + if len(ce.exemplars) == 0 { return storage.ErrExemplarsDisabled } diff --git a/tsdb/head.go b/tsdb/head.go index 4696884f21..ca953b1755 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1453,7 +1453,7 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { } } for _, s := range stones { - h.tombstones.AddInterval(storage.SeriesRef(s.Ref), s.Intervals[0]) + h.tombstones.AddInterval(s.Ref, s.Intervals[0]) } return nil diff --git a/tsdb/head_test.go b/tsdb/head_test.go index e80c197b23..9326ddbe1c 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3005,7 +3005,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { hists = tsdbutil.GenerateTestHistograms(numHistograms) } for _, h := range hists { - h.Count = h.Count * 2 + h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets _, err := app.AppendHistogram(0, s1, ts, h, nil) @@ -3028,7 +3028,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { hists = tsdbutil.GenerateTestFloatHistograms(numHistograms) } for _, h := range hists { - h.Count = h.Count * 2 + h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets _, err := app.AppendHistogram(0, s1, ts, nil, h) @@ -3069,26 +3069,26 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { } for _, h := range hists { ts++ - h.Count = h.Count * 2 + h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets - _, err := app.AppendHistogram(0, s2, int64(ts), h, nil) + _, err := app.AppendHistogram(0, s2, ts, h, nil) require.NoError(t, err) eh := h.Copy() if !gauge && ts > 30 && (ts-10)%20 == 1 { // Need "unknown" hint after float sample. eh.CounterResetHint = histogram.UnknownCounterReset } - exp[k2] = append(exp[k2], sample{t: int64(ts), h: eh}) + exp[k2] = append(exp[k2], sample{t: ts, h: eh}) if ts%20 == 0 { require.NoError(t, app.Commit()) app = head.Appender(context.Background()) // Add some float. for i := 0; i < 10; i++ { ts++ - _, err := app.Append(0, s2, int64(ts), float64(ts)) + _, err := app.Append(0, s2, ts, float64(ts)) require.NoError(t, err) - exp[k2] = append(exp[k2], sample{t: int64(ts), f: float64(ts)}) + exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)}) } require.NoError(t, app.Commit()) app = head.Appender(context.Background()) @@ -3106,26 +3106,26 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { } for _, h := range hists { ts++ - h.Count = h.Count * 2 + h.Count *= 2 h.NegativeSpans = h.PositiveSpans h.NegativeBuckets = h.PositiveBuckets - _, err := app.AppendHistogram(0, s2, int64(ts), nil, h) + _, err := app.AppendHistogram(0, s2, ts, nil, h) require.NoError(t, err) eh := h.Copy() if !gauge && ts > 30 && (ts-10)%20 == 1 { // Need "unknown" hint after float sample. eh.CounterResetHint = histogram.UnknownCounterReset } - exp[k2] = append(exp[k2], sample{t: int64(ts), fh: eh}) + exp[k2] = append(exp[k2], sample{t: ts, fh: eh}) if ts%20 == 0 { require.NoError(t, app.Commit()) app = head.Appender(context.Background()) // Add some float. for i := 0; i < 10; i++ { ts++ - _, err := app.Append(0, s2, int64(ts), float64(ts)) + _, err := app.Append(0, s2, ts, float64(ts)) require.NoError(t, err) - exp[k2] = append(exp[k2], sample{t: int64(ts), f: float64(ts)}) + exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)}) } require.NoError(t, app.Commit()) app = head.Appender(context.Background()) @@ -4495,11 +4495,12 @@ func TestHistogramValidation(t *testing.T) { } err = ValidateFloatHistogram(tc.h.ToFloat()) - if tc.errMsgFloat != "" { + switch { + case tc.errMsgFloat != "": require.ErrorContains(t, err, tc.errMsgFloat) - } else if tc.errMsg != "" { + case tc.errMsg != "": require.ErrorContains(t, err, tc.errMsg) - } else { + default: require.NoError(t, err) } }) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 6a8a30d5ad..b3537d060a 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -299,7 +299,7 @@ Outer: unknownRefs.Inc() continue } - h.tombstones.AddInterval(storage.SeriesRef(s.Ref), itv) + h.tombstones.AddInterval(s.Ref, itv) } } tstonesPool.Put(v) @@ -382,7 +382,7 @@ Outer: floatHistogramsPool.Put(v) case []record.RefMetadata: for _, m := range v { - s := h.series.getByID(chunks.HeadSeriesRef(m.Ref)) + s := h.series.getByID(m.Ref) if s == nil { unknownMetadataRefs.Inc() continue diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 9f584ee821..50a701d3a6 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -536,7 +536,7 @@ func (w *Writer) finishSymbols() error { // Write out the length and symbol count. w.buf1.Reset() w.buf1.PutBE32int(int(symbolTableSize)) - w.buf1.PutBE32int(int(w.numSymbols)) + w.buf1.PutBE32int(w.numSymbols) if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil { return err } diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index b55d70df03..c57c085ec4 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -561,10 +561,8 @@ func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { // NOTE: mergedPostings struct requires the user to issue an initial Next. if it.Next() { ph = append(ph, it) - } else { - if it.Err() != nil { - return &mergedPostings{err: it.Err()}, true - } + } else if it.Err() != nil { + return &mergedPostings{err: it.Err()}, true } } @@ -699,15 +697,16 @@ func (rp *removedPostings) Next() bool { } fcur, rcur := rp.full.At(), rp.remove.At() - if fcur < rcur { + switch { + case fcur < rcur: rp.cur = fcur rp.fok = rp.full.Next() return true - } else if rcur < fcur { + case rcur < fcur: // Forward the remove postings to the right position. rp.rok = rp.remove.Seek(fcur) - } else { + default: // Skip the current posting. rp.fok = rp.full.Next() } diff --git a/tsdb/index/postingsstats.go b/tsdb/index/postingsstats.go index 5e5880720a..6b29bddabf 100644 --- a/tsdb/index/postingsstats.go +++ b/tsdb/index/postingsstats.go @@ -31,10 +31,10 @@ type maxHeap struct { Items []Stat } -func (m *maxHeap) init(len int) { - m.maxLength = len +func (m *maxHeap) init(length int) { + m.maxLength = length m.minValue = math.MaxUint64 - m.Items = make([]Stat, 0, len) + m.Items = make([]Stat, 0, length) } func (m *maxHeap) push(item Stat) { diff --git a/tsdb/isolation.go b/tsdb/isolation.go index 74d63c6af0..401e5885a0 100644 --- a/tsdb/isolation.go +++ b/tsdb/isolation.go @@ -254,7 +254,7 @@ func (txr *txRing) add(appendID uint64) { if txr.txIDCount == len(txr.txIDs) { // Ring buffer is full, expand by doubling. newRing := make([]uint64, txr.txIDCount*2) - idx := copy(newRing[:], txr.txIDs[txr.txIDFirst:]) + idx := copy(newRing, txr.txIDs[txr.txIDFirst:]) copy(newRing[idx:], txr.txIDs[:txr.txIDFirst]) txr.txIDs = newRing txr.txIDFirst = 0 diff --git a/tsdb/querier.go b/tsdb/querier.go index 4b3144c71d..8806c7e735 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -239,18 +239,20 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, } for _, m := range ms { - if m.Name == "" && m.Value == "" { // Special-case for AllPostings, used in tests at least. + switch { + case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least. k, v := index.AllPostingsKey() allPostings, err := ix.Postings(k, v) if err != nil { return nil, err } its = append(its, allPostings) - } else if labelMustBeSet[m.Name] { + case labelMustBeSet[m.Name]: // If this matcher must be non-empty, we can be smarter. matchesEmpty := m.Matches("") isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp - if isNot && matchesEmpty { // l!="foo" + switch { + case isNot && matchesEmpty: // l!="foo" // If the label can't be empty and is a Not and the inner matcher // doesn't match empty, then subtract it out at the end. inverse, err := m.Inverse() @@ -263,7 +265,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, return nil, err } notIts = append(notIts, it) - } else if isNot && !matchesEmpty { // l!="" + case isNot && !matchesEmpty: // l!="" // If the label can't be empty and is a Not, but the inner matcher can // be empty we need to use inversePostingsForMatcher. inverse, err := m.Inverse() @@ -279,7 +281,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, return index.EmptyPostings(), nil } its = append(its, it) - } else { // l="a" + default: // l="a" // Non-Not matcher, use normal postingsForMatcher. it, err := postingsForMatcher(ix, m) if err != nil { @@ -290,7 +292,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, } its = append(its, it) } - } else { // l="" + default: // l="" // If the matchers for a labelname selects an empty value, it selects all // the series which don't have the label name set too. See: // https://github.com/prometheus/prometheus/issues/3575 and @@ -965,24 +967,24 @@ func (m *mergedStringIter) Next() bool { if (!m.aok && !m.bok) || (m.Err() != nil) { return false } - - if !m.aok { + switch { + case !m.aok: m.cur = m.b.At() m.bok = m.b.Next() m.err = m.b.Err() - } else if !m.bok { + case !m.bok: m.cur = m.a.At() m.aok = m.a.Next() m.err = m.a.Err() - } else if m.b.At() > m.a.At() { + case m.b.At() > m.a.At(): m.cur = m.a.At() m.aok = m.a.Next() m.err = m.a.Err() - } else if m.a.At() > m.b.At() { + case m.a.At() > m.b.At(): m.cur = m.b.At() m.bok = m.b.Next() m.err = m.b.Err() - } else { // Equal. + default: // Equal. m.cur = m.b.At() m.aok = m.a.Next() m.err = m.a.Err() diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index fa3dd2418f..8f52fff28a 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -113,7 +113,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe var chunkRef chunks.ChunkRef for i, s := range tc { - i = i + 1 // 0 is not a valid posting. + i++ // 0 is not a valid posting. metas := make([]chunks.Meta, 0, len(s.chunks)) for _, chk := range s.chunks { if chk[0].t < blockMint { @@ -2012,7 +2012,7 @@ func BenchmarkQueries(b *testing.B) { for x := 0; x <= 10; x++ { block, err := OpenBlock(nil, createBlock(b, dir, series), nil) require.NoError(b, err) - q, err := NewBlockQuerier(block, 1, int64(nSamples)) + q, err := NewBlockQuerier(block, 1, nSamples) require.NoError(b, err) qs = append(qs, q) } diff --git a/tsdb/wal.go b/tsdb/wal.go index e0bc1ec699..a9af76d150 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -90,7 +90,7 @@ func newWalMetrics(r prometheus.Registerer) *walMetrics { // WAL is a write ahead log that can log new series labels and samples. // It must be completely read before new entries are logged. // -// DEPRECATED: use wlog pkg combined with the record codex instead. +// Deprecated: use wlog pkg combined with the record codex instead. type WAL interface { Reader() WALReader LogSeries([]record.RefSeries) error @@ -147,7 +147,7 @@ func newCRC32() hash.Hash32 { // SegmentWAL is a write ahead log for series data. // -// DEPRECATED: use wlog pkg combined with the record coders instead. +// Deprecated: use wlog pkg combined with the record coders instead. type SegmentWAL struct { mtx sync.Mutex metrics *walMetrics diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index ed8a9df2e3..7f9133a76d 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -428,10 +428,10 @@ func TestLogPartialWrite(t *testing.T) { faultyRecord: pageSize / (recordHeaderSize + len(record)), }, // TODO the current implementation suffers this: - //"partial write when logging a record overlapping two pages": { + // "partial write when logging a record overlapping two pages": { // numRecords: (pageSize / (recordHeaderSize + len(record))) + 10, // faultyRecord: pageSize/(recordHeaderSize+len(record)) + 1, - //}, + // }, } for testName, testData := range tests { diff --git a/util/runtime/limits_default.go b/util/runtime/limits_default.go index c3e0b4701a..1588a93c79 100644 --- a/util/runtime/limits_default.go +++ b/util/runtime/limits_default.go @@ -39,7 +39,7 @@ func getLimits(resource int, unit string) string { if err != nil { panic("syscall.Getrlimit failed: " + err.Error()) } - return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(uint64(rlimit.Cur), unit), limitToString(uint64(rlimit.Max), unit)) + return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(rlimit.Cur, unit), limitToString(rlimit.Max, unit)) } // FdLimits returns the soft and hard limits for file descriptors. diff --git a/util/runtime/statfs_default.go b/util/runtime/statfs_default.go index f850f2cd6d..2e31d93fca 100644 --- a/util/runtime/statfs_default.go +++ b/util/runtime/statfs_default.go @@ -72,11 +72,13 @@ func Statfs(path string) string { var fs syscall.Statfs_t err := syscall.Statfs(path, &fs) + //nolint:unconvert // This ensure Type format on all Platforms + localType := int64(fs.Type) if err != nil { - return strconv.FormatInt(int64(fs.Type), 16) + return strconv.FormatInt(localType, 16) } - if fsType, ok := fsTypes[int64(fs.Type)]; ok { + if fsType, ok := fsTypes[localType]; ok { return fsType } - return strconv.FormatInt(int64(fs.Type), 16) + return strconv.FormatInt(localType, 16) } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index aeea87ca70..2e0016fd25 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -243,7 +243,7 @@ func NewAPI( remoteReadConcurrencyLimit int, remoteReadMaxBytesInFrame int, isAgent bool, - CORSOrigin *regexp.Regexp, + corsOrigin *regexp.Regexp, runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, gatherer prometheus.Gatherer, @@ -269,7 +269,7 @@ func NewAPI( enableAdmin: enableAdmin, rulesRetriever: rr, logger: logger, - CORSOrigin: CORSOrigin, + CORSOrigin: corsOrigin, runtimeInfo: runtimeInfo, buildInfo: buildInfo, gatherer: gatherer, diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index efce042214..27cbab1b33 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -2791,7 +2791,7 @@ func TestRespondSuccess(t *testing.T) { } var res response - if err = json.Unmarshal([]byte(body), &res); err != nil { + if err = json.Unmarshal(body, &res); err != nil { t.Fatalf("Error unmarshaling JSON body: %s", err) } @@ -2827,7 +2827,7 @@ func TestRespondError(t *testing.T) { } var res response - if err = json.Unmarshal([]byte(body), &res); err != nil { + if err = json.Unmarshal(body, &res); err != nil { t.Fatalf("Error unmarshaling JSON body: %s", err) } diff --git a/web/web.go b/web/web.go index 9d63094f69..f4f64163df 100644 --- a/web/web.go +++ b/web/web.go @@ -719,9 +719,9 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { } if h.options.TSDBMaxBytes != 0 { if status.StorageRetention != "" { - status.StorageRetention = status.StorageRetention + " or " + status.StorageRetention += " or " } - status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String() + status.StorageRetention += h.options.TSDBMaxBytes.String() } metrics, err := h.gatherer.Gather() From 5d4ec08a1fd65ff84ea16bca22ad3b20b91604f6 Mon Sep 17 00:00:00 2001 From: Sebastian Rabenhorst Date: Fri, 14 Apr 2023 11:59:30 +0200 Subject: [PATCH 04/26] Fixed sampleRingIterator for mixed histograms Signed-off-by: Sebastian Rabenhorst Fixed sampleRingIterator for mixed histograms Signed-off-by: Sebastian Rabenhorst Fixed lint --- storage/buffer.go | 2 ++ storage/buffer_test.go | 23 +++++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/storage/buffer.go b/storage/buffer.go index 27ac21661b..2229e52591 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -332,9 +332,11 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { switch s.Type() { case chunkenc.ValHistogram: it.h = s.H() + it.fh = nil return chunkenc.ValHistogram case chunkenc.ValFloatHistogram: it.fh = s.FH() + it.h = nil return chunkenc.ValFloatHistogram default: it.f = s.F() diff --git a/storage/buffer_test.go b/storage/buffer_test.go index ebe24d8df3..70cd7f4ff5 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -21,6 +21,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" ) func TestSampleRing(t *testing.T) { @@ -180,6 +181,28 @@ func TestBufferedSeriesIteratorNoBadAt(t *testing.T) { it.Next() } +func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) { + histograms := tsdbutil.GenerateTestHistograms(2) + + it := NewBufferIterator(NewListSeriesIterator(samples{ + fhSample{t: 1, fh: histograms[0].ToFloat()}, + hSample{t: 2, h: histograms[1]}, + }), 2) + + require.Equal(t, chunkenc.ValNone, it.Seek(3)) + require.NoError(t, it.Err()) + + buf := it.Buffer() + + require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) + _, fh := buf.AtFloatHistogram() + require.Equal(t, histograms[0].ToFloat(), fh) + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, fh = buf.AtFloatHistogram() + require.Equal(t, histograms[1].ToFloat(), fh) +} + func BenchmarkBufferedSeriesIterator(b *testing.B) { // Simulate a 5 minute rate. it := NewBufferIterator(newFakeSeriesIterator(int64(b.N), 30), 5*60) From f3394bf7a1f362b88d89cb0784be47b75d8fa152 Mon Sep 17 00:00:00 2001 From: gotjosh Date: Tue, 18 Apr 2023 10:07:32 +0100 Subject: [PATCH 05/26] Rules API: Allow filtering by rule name Introduces support for a new query parameter in the `/rules` API endpoint that allows filtering by rule names. If all the rules of a group are filtered, we skip the group entirely. Signed-off-by: gotjosh --- docs/querying/api.md | 2 ++ web/api/v1/api.go | 33 +++++++++++++++++++++++++++------ web/api/v1/api_test.go | 27 +++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 6 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index f2182a2052..0cc549b65e 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -673,7 +673,9 @@ GET /api/v1/rules ``` URL query parameters: + - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. +- `rules=alertName,RuleName`: return only the alerting and recording rules with the specified names. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules diff --git a/web/api/v1/api.go b/web/api/v1/api.go index aeea87ca70..d955958041 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1296,6 +1296,16 @@ func (api *API) rules(r *http.Request) apiFuncResult { res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, len(ruleGroups))} typ := strings.ToLower(r.URL.Query().Get("type")) + // Parse the rule names into a comma separated list of rule names, then create a set. + rulesQuery := r.URL.Query().Get("rules") + ruleNamesSet := map[string]struct{}{} + if rulesQuery != "" { + names := strings.Split(rulesQuery, ",") + for _, rn := range names { + ruleNamesSet[strings.TrimSpace(rn)] = struct{}{} + } + } + if typ != "" && typ != "alert" && typ != "record" { return invalidParamError(errors.Errorf("not supported value %q", typ), "type") } @@ -1313,14 +1323,20 @@ func (api *API) rules(r *http.Request) apiFuncResult { EvaluationTime: grp.GetEvaluationTime().Seconds(), LastEvaluation: grp.GetLastEvaluation(), } - for _, r := range grp.Rules() { + for _, rr := range grp.Rules() { var enrichedRule Rule - lastError := "" - if r.LastError() != nil { - lastError = r.LastError().Error() + if len(ruleNamesSet) > 0 { + if _, ok := ruleNamesSet[rr.Name()]; !ok { + continue + } } - switch rule := r.(type) { + + lastError := "" + if rr.LastError() != nil { + lastError = rr.LastError().Error() + } + switch rule := rr.(type) { case *rules.AlertingRule: if !returnAlerts { break @@ -1358,11 +1374,16 @@ func (api *API) rules(r *http.Request) apiFuncResult { err := errors.Errorf("failed to assert type of rule '%v'", rule.Name()) return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil} } + if enrichedRule != nil { apiRuleGroup.Rules = append(apiRuleGroup.Rules, enrichedRule) } } - res.RuleGroups[i] = apiRuleGroup + + // If the rule group response has no rules, skip it - this means we filtered all the rules of this group. + if len(apiRuleGroup.Rules) > 0 { + res.RuleGroups[i] = apiRuleGroup + } } return apiFuncResult{res, nil, nil, nil} } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index efce042214..e354bf298f 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -1973,6 +1973,33 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, + { + endpoint: api.rules, + query: url.Values{"rules": []string{"test_metric4"}}, + response: &RuleDiscovery{ + RuleGroups: []*RuleGroup{ + { + Name: "grp", + File: "/path/to/file", + Interval: 1, + Limit: 0, + Rules: []Rule{ + AlertingRule{ + State: "inactive", + Name: "test_metric4", + Query: "up == 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "unknown", + Type: "alerting", + }, + }, + }, + }, + }, + }, { endpoint: api.queryExemplars, query: url.Values{ From 96b6463f2587f8c3da1031bfd5bc6b9aca575733 Mon Sep 17 00:00:00 2001 From: gotjosh Date: Tue, 18 Apr 2023 16:26:21 +0100 Subject: [PATCH 06/26] review comments Signed-off-by: gotjosh --- web/api/v1/api.go | 42 ++++++++++++++++++++++++++++++------------ web/api/v1/api_test.go | 34 +++++++++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 13 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index d955958041..9a13e09d98 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1292,20 +1292,26 @@ type RecordingRule struct { } func (api *API) rules(r *http.Request) apiFuncResult { + if err := r.ParseForm(); err != nil { + return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "error parsing form values")}, nil, nil} + } + + queryFormToSet := func(values []string) map[string]struct{} { + set := make(map[string]struct{}, len(values)) + for _, v := range values { + set[v] = struct{}{} + } + return set + } + + rnSet := queryFormToSet(r.Form["rule_name[]"]) + rgSet := queryFormToSet(r.Form["rule_group[]"]) + fSet := queryFormToSet(r.Form["file[]"]) + ruleGroups := api.rulesRetriever(r.Context()).RuleGroups() res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, len(ruleGroups))} typ := strings.ToLower(r.URL.Query().Get("type")) - // Parse the rule names into a comma separated list of rule names, then create a set. - rulesQuery := r.URL.Query().Get("rules") - ruleNamesSet := map[string]struct{}{} - if rulesQuery != "" { - names := strings.Split(rulesQuery, ",") - for _, rn := range names { - ruleNamesSet[strings.TrimSpace(rn)] = struct{}{} - } - } - if typ != "" && typ != "alert" && typ != "record" { return invalidParamError(errors.Errorf("not supported value %q", typ), "type") } @@ -1314,6 +1320,18 @@ func (api *API) rules(r *http.Request) apiFuncResult { returnRecording := typ == "" || typ == "record" for i, grp := range ruleGroups { + if len(rgSet) > 0 { + if _, ok := rgSet[grp.Name()]; !ok { + continue + } + } + + if len(fSet) > 0 { + if _, ok := fSet[grp.File()]; !ok { + continue + } + } + apiRuleGroup := &RuleGroup{ Name: grp.Name(), File: grp.File(), @@ -1326,8 +1344,8 @@ func (api *API) rules(r *http.Request) apiFuncResult { for _, rr := range grp.Rules() { var enrichedRule Rule - if len(ruleNamesSet) > 0 { - if _, ok := ruleNamesSet[rr.Name()]; !ok { + if len(rnSet) > 0 { + if _, ok := rnSet[rr.Name()]; !ok { continue } } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index e354bf298f..c3e1bf59d7 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -1975,7 +1975,39 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, { endpoint: api.rules, - query: url.Values{"rules": []string{"test_metric4"}}, + query: url.Values{"rule_name[]": []string{"test_metric4"}}, + response: &RuleDiscovery{ + RuleGroups: []*RuleGroup{ + { + Name: "grp", + File: "/path/to/file", + Interval: 1, + Limit: 0, + Rules: []Rule{ + AlertingRule{ + State: "inactive", + Name: "test_metric4", + Query: "up == 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "unknown", + Type: "alerting", + }, + }, + }, + }, + }, + }, + { + endpoint: api.rules, + query: url.Values{"rule_group[]": []string{"respond-with-nothing"}}, + response: &RuleDiscovery{RuleGroups: []*RuleGroup{nil}}, + }, + { + endpoint: api.rules, + query: url.Values{"file[]": []string{"/path/to/file"}, "rule_name[]": []string{"test_metric4"}}, response: &RuleDiscovery{ RuleGroups: []*RuleGroup{ { From e2a2790b2c830e903e28562d667b0a03adb3beeb Mon Sep 17 00:00:00 2001 From: gotjosh Date: Tue, 18 Apr 2023 16:50:16 +0100 Subject: [PATCH 07/26] add more docs Signed-off-by: gotjosh --- docs/querying/api.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 0cc549b65e..bc0587dd0c 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -675,7 +675,9 @@ GET /api/v1/rules URL query parameters: - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. -- `rules=alertName,RuleName`: return only the alerting and recording rules with the specified names. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_name[]=`: return the groups and rules of the specified alerting and recording rules names, the parameter supports repetition. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_group[]=`: return the group and rules of the specified rule group names, the parameter supports repetitions. When the parameter is absent or empty, no filtering is done. +- `file[]=`: return the group and rules of specified filepath for rule groups, the parameter supports repetition. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules From 28909a46362737c218039dbf952d13e200b31c1f Mon Sep 17 00:00:00 2001 From: gotjosh Date: Tue, 18 Apr 2023 16:51:35 +0100 Subject: [PATCH 08/26] more worthsmithing Signed-off-by: gotjosh --- docs/querying/api.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index bc0587dd0c..c6d8d2c836 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -675,9 +675,9 @@ GET /api/v1/rules URL query parameters: - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. -- `rule_name[]=`: return the groups and rules of the specified alerting and recording rules names, the parameter supports repetition. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. -- `rule_group[]=`: return the group and rules of the specified rule group names, the parameter supports repetitions. When the parameter is absent or empty, no filtering is done. -- `file[]=`: return the group and rules of specified filepath for rule groups, the parameter supports repetition. When the parameter is absent or empty, no filtering is done. +- `rule_name[]=`: return the groups and its rules of the specified alerting and recording rules names, the parameter supports repetition. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_group[]=`: return the groups and its rules of the specified rule group names, the parameter supports repetitions. When the parameter is absent or empty, no filtering is done. +- `file[]=`: return the groups and its rules of specified filepath for rule groups, the parameter supports repetition. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules From cf230bcd18bbbb429a46985049b049abb3437140 Mon Sep 17 00:00:00 2001 From: gotjosh Date: Wed, 19 Apr 2023 09:49:34 +0100 Subject: [PATCH 09/26] more wordsmithing Signed-off-by: gotjosh --- docs/querying/api.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index c6d8d2c836..820414fb13 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -675,9 +675,9 @@ GET /api/v1/rules URL query parameters: - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. -- `rule_name[]=`: return the groups and its rules of the specified alerting and recording rules names, the parameter supports repetition. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. -- `rule_group[]=`: return the groups and its rules of the specified rule group names, the parameter supports repetitions. When the parameter is absent or empty, no filtering is done. -- `file[]=`: return the groups and its rules of specified filepath for rule groups, the parameter supports repetition. When the parameter is absent or empty, no filtering is done. +- `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of provided rule group names are returned. When the parameter is absent or empty, no filtering is done. +- `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of provided filepaths are returned. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules From c3c7d44d845e08969845133a8c64c631ddbbb437 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 12 Apr 2023 13:05:41 +0200 Subject: [PATCH 10/26] lint: Adjust to the lint warnings raised by current versions of golint-ci MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We haven't updated golint-ci in our CI yet, but this commit prepares for that. There are a lot of new warnings, and it is mostly because the "revive" linter got updated. I agree with most of the new warnings, mostly around not naming unused function parameters (although it is justified in some cases for documentation purposes – while things like mocks are a good example where not naming the parameter is clearer). I'm pretty upset about the "empty block" warning to include `for` loops. It's such a common pattern to do something in the head of the `for` loop and then have an empty block. There is still an open issue about this: https://github.com/mgechev/revive/issues/810 I have disabled "revive" altogether in files where empty blocks are used excessively, and I have made the effort to add individual `// nolint:revive` where empty blocks are used just once or twice. It's borderline noisy, though, but let's go with it for now. I should mention that none of the "empty block" warnings for `for` loop bodies were legitimate. Signed-off-by: beorn7 --- cmd/prometheus/main.go | 1 + cmd/promtool/backfill_test.go | 2 +- cmd/promtool/rules.go | 2 +- cmd/promtool/rules_test.go | 4 ++-- discovery/aws/ec2.go | 2 +- discovery/hetzner/hcloud.go | 2 +- discovery/hetzner/robot.go | 4 ++-- discovery/ionos/server.go | 2 +- discovery/kubernetes/client_metrics.go | 6 +++--- discovery/kubernetes/endpoints.go | 1 + discovery/kubernetes/endpointslice.go | 2 +- discovery/kubernetes/ingress.go | 2 +- discovery/kubernetes/node.go | 2 +- discovery/kubernetes/pod.go | 2 +- discovery/kubernetes/service.go | 2 +- discovery/legacymanager/manager_test.go | 13 +++---------- discovery/manager_test.go | 17 +++++------------ discovery/nomad/nomad.go | 2 +- discovery/ovhcloud/dedicated_server.go | 2 +- discovery/ovhcloud/vps.go | 2 +- discovery/vultr/vultr.go | 4 +--- discovery/zookeeper/zookeeper.go | 2 +- model/textparse/promparse.go | 7 ++++--- promql/bench_test.go | 2 +- promql/engine.go | 2 +- promql/functions.go | 1 + promql/parser/lex.go | 3 ++- promql/parser/parse.go | 6 +++--- rules/manager.go | 7 ++++--- rules/manager_test.go | 8 ++++---- rules/origin_test.go | 22 +++++++++++----------- scrape/scrape_test.go | 4 ++-- scrape/target_test.go | 2 +- storage/buffer_test.go | 4 ++-- storage/fanout_test.go | 2 +- storage/interface.go | 6 +++--- storage/memoized_iterator_test.go | 4 ++-- storage/merge.go | 11 +++++------ storage/remote/codec_test.go | 2 +- storage/remote/queue_manager_test.go | 10 +++++----- storage/remote/write_handler_test.go | 2 +- tsdb/agent/db.go | 16 ++++++++-------- tsdb/chunkenc/float_histogram.go | 2 +- tsdb/chunkenc/float_histogram_test.go | 2 +- tsdb/chunkenc/histogram.go | 2 +- tsdb/chunkenc/histogram_test.go | 2 +- tsdb/chunkenc/xor.go | 6 +++--- tsdb/chunks/head_chunks_test.go | 4 ++-- tsdb/compact_test.go | 4 ++-- tsdb/db_test.go | 6 +++--- tsdb/exemplar.go | 12 ++++++------ tsdb/goversion/goversion_test.go | 2 +- tsdb/head_test.go | 7 ++++--- tsdb/head_wal.go | 1 + tsdb/ooo_head_read.go | 1 + tsdb/querier.go | 2 +- tsdb/querier_bench_test.go | 2 +- tsdb/querier_test.go | 1 + tsdb/wal.go | 1 + tsdb/wlog/reader_test.go | 2 +- tsdb/wlog/wlog_test.go | 2 +- util/logging/dedupe_test.go | 2 +- util/testutil/context.go | 2 +- util/testutil/roundtrip.go | 2 +- util/treecache/treecache.go | 2 +- web/api/v1/api.go | 14 +++++++------- web/api/v1/api_test.go | 6 +++--- web/api/v1/errors_test.go | 1 + web/web.go | 6 +++--- 69 files changed, 145 insertions(+), 150 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index f4f6af20df..5592036466 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -12,6 +12,7 @@ // limitations under the License. // The main package for the Prometheus server executable. +// nolint:revive // Many unsued function arguments in this file by design. package main import ( diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index 2c551abeb3..e6f7cad31b 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -44,7 +44,7 @@ func sortSamples(samples []backfillSample) { }) } -func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { +func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) samples := []backfillSample{} for ss.Next() { diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index aedc7bcb9d..43b76dbe4a 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -68,7 +68,7 @@ func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient que } // loadGroups parses groups from a list of recording rule files. -func (importer *ruleImporter) loadGroups(ctx context.Context, filenames []string) (errs []error) { +func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) { groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...) if errs != nil { return errs diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index fb582ed0dd..213b7d2a01 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -35,7 +35,7 @@ type mockQueryRangeAPI struct { samples model.Matrix } -func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { +func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive return mockAPI.samples, v1.Warnings{}, nil } @@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) { } } -func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { +func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { logger := log.NewNopLogger() cfg := ruleImporterConfig{ outputDir: tmpDir, diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index ca9921159d..86d76627e1 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -164,7 +164,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery { return d } -func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) { +func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) { if d.ec2 != nil { return d.ec2, nil } diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index aa406a1a7a..50afdc1ec3 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -59,7 +59,7 @@ type hcloudDiscovery struct { } // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. -func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) { +func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) { d := &hcloudDiscovery{ port: conf.Port, } diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 4b7abaf77f..4960880289 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -51,7 +51,7 @@ type robotDiscovery struct { } // newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets. -func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) { +func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) { d := &robotDiscovery{ port: conf.Port, endpoint: conf.robotEndpoint, @@ -69,7 +69,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro return d, nil } -func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { req, err := http.NewRequest("GET", d.endpoint+"/server", nil) if err != nil { return nil, err diff --git a/discovery/ionos/server.go b/discovery/ionos/server.go index 8ac3639705..a850fbbfb4 100644 --- a/discovery/ionos/server.go +++ b/discovery/ionos/server.go @@ -60,7 +60,7 @@ type serverDiscovery struct { datacenterID string } -func newServerDiscovery(conf *SDConfig, logger log.Logger) (*serverDiscovery, error) { +func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) { d := &serverDiscovery{ port: conf.Port, datacenterID: conf.DatacenterID, diff --git a/discovery/kubernetes/client_metrics.go b/discovery/kubernetes/client_metrics.go index 3a33e3e8d5..b316f7d885 100644 --- a/discovery/kubernetes/client_metrics.go +++ b/discovery/kubernetes/client_metrics.go @@ -122,11 +122,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer ) } -func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) { +func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) { clientGoRequestResultMetricVec.WithLabelValues(code).Inc() } -func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) { +func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) { clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) } @@ -169,7 +169,7 @@ func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetr return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) } -func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { +func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric { // Retries are not used so the metric is omitted. return noopMetric{} } diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 039daf4faf..27742ab464 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many legitimately empty blocks in this file. package kubernetes import ( diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 135735154c..29dc0be2f6 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -190,7 +190,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) } go func() { - for e.process(ctx, ch) { + for e.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index 8c9249f545..ad47c341a5 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -89,7 +89,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for i.process(ctx, ch) { + for i.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index 93adf78252..16a06e7a0c 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for n.process(ctx, ch) { + for n.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 396720c223..732cf52ad9 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -132,7 +132,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for p.process(ctx, ch) { + for p.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index a19f06e7d1..40e17679ee 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -92,7 +92,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for s.process(ctx, ch) { + for s.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index 57c82b72a8..13b84e6e36 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) { case tgs := <-provUpdates: discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string { - return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v", - x, - got, - expected) - }) + assertEqualGroups(t, got, tc.expectedTargets[x]) } } } @@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) { } } -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) { +func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { t.Helper() // Need to sort by the groups's source as the received order is not guaranteed. @@ -1079,9 +1074,7 @@ func TestCoordinationWithReceiver(t *testing.T) { if _, ok := tgs[k]; !ok { t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) } - assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string { - return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected) - }) + assertEqualGroups(t, tgs[k], expected.tgs[k]) } } } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 970168b0f5..5371608112 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) { case tgs := <-provUpdates: discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string { - return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v", - x, - got, - expected) - }) + assertEqualGroups(t, got, tc.expectedTargets[x]) } } } @@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) { } } -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) { +func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { t.Helper() // Need to sort by the groups's source as the received order is not guaranteed. @@ -1129,7 +1124,7 @@ type lockStaticConfig struct { } func (s lockStaticConfig) Name() string { return "lockstatic" } -func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) { +func (s lockStaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return (lockStaticDiscoverer)(s), nil } @@ -1330,9 +1325,7 @@ func TestCoordinationWithReceiver(t *testing.T) { if _, ok := tgs[k]; !ok { t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) } - assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string { - return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected) - }) + assertEqualGroups(t, tgs[k], expected.tgs[k]) } } } @@ -1399,7 +1392,7 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { // TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when // ApplyConfig happens at the same time as targets update. -func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { +func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() discoveryManager := NewManager(ctx, log.NewNopLogger()) diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go index c8d5130396..7013f0737c 100644 --- a/discovery/nomad/nomad.go +++ b/discovery/nomad/nomad.go @@ -161,7 +161,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { return d, nil } -func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) { opts := &nomad.QueryOptions{ AllowStale: d.allowStale, } diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go index aeb4eccbbf..bb5dadcd7b 100644 --- a/discovery/ovhcloud/dedicated_server.go +++ b/discovery/ovhcloud/dedicated_server.go @@ -102,7 +102,7 @@ func (d *dedicatedServerDiscovery) getSource() string { return fmt.Sprintf("%s_%s", d.config.Name(), d.getService()) } -func (d *dedicatedServerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { client, err := createClient(d.config) if err != nil { return nil, err diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go index 705b42b655..e2d1dee364 100644 --- a/discovery/ovhcloud/vps.go +++ b/discovery/ovhcloud/vps.go @@ -117,7 +117,7 @@ func (d *vpsDiscovery) getSource() string { return fmt.Sprintf("%s_%s", d.config.Name(), d.getService()) } -func (d *vpsDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { client, err := createClient(d.config) if err != nil { return nil, err diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index 2f489e7d45..42881d3c19 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -202,10 +202,8 @@ func (d *Discovery) listInstances(ctx context.Context) ([]govultr.Instance, erro if meta.Links.Next == "" { break - } else { - listOptions.Cursor = meta.Links.Next - continue } + listOptions.Cursor = meta.Links.Next } return instances, nil diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index 308d63a5fc..cadff5fd2e 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -193,7 +193,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } for _, pathUpdate := range d.pathUpdates { // Drain event channel in case the treecache leaks goroutines otherwise. - for range pathUpdate { + for range pathUpdate { // nolint:revive } } d.conn.Close() diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index 2c981f050e..94338a6660 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -238,9 +238,10 @@ func (p *PromParser) Metric(l *labels.Labels) string { return s } -// Exemplar writes the exemplar of the current sample into the passed -// exemplar. It returns if an exemplar exists. -func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool { +// Exemplar implements the Parser interface. However, since the classic +// Prometheus text format does not support exemplars, this implementation simply +// returns false and does nothing else. +func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } diff --git a/promql/bench_test.go b/promql/bench_test.go index 3d35884477..4ece16bfe4 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -27,7 +27,7 @@ import ( "github.com/prometheus/prometheus/util/teststorage" ) -func setupRangeQueryTestData(stor *teststorage.TestStorage, engine *Engine, interval, numIntervals int) error { +func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error { metrics := []labels.Labels{} metrics = append(metrics, labels.FromStrings("__name__", "a_one")) metrics = append(metrics, labels.FromStrings("__name__", "b_one")) diff --git a/promql/engine.go b/promql/engine.go index fae4f09320..21b894b936 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1957,7 +1957,7 @@ func (ev *evaluator) matrixIterSlice( // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; floats[drop].T < mint; drop++ { + for drop = 0; floats[drop].T < mint; drop++ { // nolint:revive } ev.currentSamples -= drop copy(floats, floats[drop:]) diff --git a/promql/functions.go b/promql/functions.go index fd99703df2..0c22cb44c7 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package promql import ( diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 7b6a6b027b..657dc28095 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many legitimately empty blocks in this file. package parser import ( @@ -293,7 +294,7 @@ func (l *Lexer) accept(valid string) bool { // acceptRun consumes a run of runes from the valid set. func (l *Lexer) acceptRun(valid string) { for strings.ContainsRune(valid, l.next()) { - // consume + // Consume. } l.backup() } diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 6c37ce6fc6..3c32f3c05b 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -332,7 +332,7 @@ func (p *parser) Lex(lval *yySymType) int { // It is a no-op since the parsers error routines are triggered // by mechanisms that allow more fine-grained control // For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc. -func (p *parser) Error(e string) { +func (p *parser) Error(string) { } // InjectItem allows injecting a single Item at the beginning of the token stream @@ -481,9 +481,9 @@ func (p *parser) checkAST(node Node) (typ ValueType) { // This is made a function instead of a variable, so it is lazily evaluated on demand. opRange := func() (r PositionRange) { // Remove whitespace at the beginning and end of the range. - for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { + for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { // nolint:revive } - for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { + for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { // nolint:revive } return } diff --git a/rules/manager.go b/rules/manager.go index b6513e82d4..31c90e9e9d 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -866,12 +866,13 @@ func (g *Group) RestoreForState(ts time.Time) { timeSpentPending := downAt.Sub(restoredActiveAt) timeRemainingPending := alertHoldDuration - timeSpentPending - if timeRemainingPending <= 0 { + switch { + case timeRemainingPending <= 0: // It means that alert was firing when prometheus went down. // In the next Eval, the state of this alert will be set back to // firing again if it's still firing in that Eval. // Nothing to be done in this case. - } else if timeRemainingPending < g.opts.ForGracePeriod { + case timeRemainingPending < g.opts.ForGracePeriod: // (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration // /* new firing time */ /* moving back by hold duration */ // @@ -884,7 +885,7 @@ func (g *Group) RestoreForState(ts time.Time) { // = (ts + m.opts.ForGracePeriod) - ts // = m.opts.ForGracePeriod restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration) - } else { + default: // By shifting ActiveAt to the future (ActiveAt + some_duration), // the total pending time from the original ActiveAt // would be `alertHoldDuration + some_duration`. diff --git a/rules/manager_test.go b/rules/manager_test.go index 440e06c9af..16bb080f55 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -779,13 +779,13 @@ func TestUpdate(t *testing.T) { rgs.Groups[i].Interval = model.Duration(10) } } - reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) + reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs) // Update limit and reload. for i := range rgs.Groups { rgs.Groups[i].Limit = 1 } - reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) + reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs) // Change group rules and reload. for i, g := range rgs.Groups { @@ -793,7 +793,7 @@ func TestUpdate(t *testing.T) { rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value)) } } - reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) + reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs) } // ruleGroupsTest for running tests over rules. @@ -836,7 +836,7 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest { } } -func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, expected map[string]labels.Labels, ogs map[string]*Group) { +func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, ogs map[string]*Group) { bs, err := yaml.Marshal(formatRules(rgs)) require.NoError(t, err) tmpFile.Seek(0, 0) diff --git a/rules/origin_test.go b/rules/origin_test.go index dd8e47f74c..ea4f4f905d 100644 --- a/rules/origin_test.go +++ b/rules/origin_test.go @@ -30,19 +30,19 @@ type unknownRule struct{} func (u unknownRule) Name() string { return "" } func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() } -func (u unknownRule) Eval(ctx context.Context, time time.Time, queryFunc QueryFunc, url *url.URL, i int) (promql.Vector, error) { +func (u unknownRule) Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) { return nil, nil } -func (u unknownRule) String() string { return "" } -func (u unknownRule) Query() parser.Expr { return nil } -func (u unknownRule) SetLastError(err error) {} -func (u unknownRule) LastError() error { return nil } -func (u unknownRule) SetHealth(health RuleHealth) {} -func (u unknownRule) Health() RuleHealth { return "" } -func (u unknownRule) SetEvaluationDuration(duration time.Duration) {} -func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 } -func (u unknownRule) SetEvaluationTimestamp(time time.Time) {} -func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} } +func (u unknownRule) String() string { return "" } +func (u unknownRule) Query() parser.Expr { return nil } +func (u unknownRule) SetLastError(error) {} +func (u unknownRule) LastError() error { return nil } +func (u unknownRule) SetHealth(RuleHealth) {} +func (u unknownRule) Health() RuleHealth { return "" } +func (u unknownRule) SetEvaluationDuration(time.Duration) {} +func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 } +func (u unknownRule) SetEvaluationTimestamp(time.Time) {} +func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} } func TestNewRuleDetailPanics(t *testing.T) { require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() { diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index dcb3b48c10..a8028b652a 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2405,7 +2405,7 @@ type testScraper struct { scrapeFunc func(context.Context, io.Writer) error } -func (ts *testScraper) offset(interval time.Duration, jitterSeed uint64) time.Duration { +func (ts *testScraper) offset(time.Duration, uint64) time.Duration { return ts.offsetDur } @@ -2867,7 +2867,7 @@ func TestScrapeAddFast(t *testing.T) { require.NoError(t, slApp.Commit()) } -func TestReuseCacheRace(t *testing.T) { +func TestReuseCacheRace(*testing.T) { var ( app = &nopAppendable{} cfg = &config.ScrapeConfig{ diff --git a/scrape/target_test.go b/scrape/target_test.go index 2bc3f000c5..9d25df4149 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -134,7 +134,7 @@ func TestTargetURL(t *testing.T) { require.Equal(t, expectedURL, target.URL()) } -func newTestTarget(targetURL string, deadline time.Duration, lbls labels.Labels) *Target { +func newTestTarget(targetURL string, _ time.Duration, lbls labels.Labels) *Target { lb := labels.NewBuilder(lbls) lb.Set(model.SchemeLabel, "http") lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://")) diff --git a/storage/buffer_test.go b/storage/buffer_test.go index ebe24d8df3..fcb43c2735 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -188,8 +188,8 @@ func BenchmarkBufferedSeriesIterator(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for it.Next() != chunkenc.ValNone { - // scan everything + for it.Next() != chunkenc.ValNone { // nolint:revive + // Scan everything. } require.NoError(b, it.Err()) } diff --git a/storage/fanout_test.go b/storage/fanout_test.go index 4996e8f64a..b4490636df 100644 --- a/storage/fanout_test.go +++ b/storage/fanout_test.go @@ -233,7 +233,7 @@ func (errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage return storage.ErrSeriesSet(errSelect) } -func (errQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { return nil, nil, errors.New("label values error") } diff --git a/storage/interface.go b/storage/interface.go index 5cf70a351b..b282f1fc62 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -99,7 +99,7 @@ type MockQueryable struct { MockQuerier Querier } -func (q *MockQueryable) Querier(ctx context.Context, mint, maxt int64) (Querier, error) { +func (q *MockQueryable) Querier(context.Context, int64, int64) (Querier, error) { return q.MockQuerier, nil } @@ -118,11 +118,11 @@ type MockQuerier struct { SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet } -func (q *MockQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { +func (q *MockQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) { return nil, nil, nil } -func (q *MockQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) { +func (q *MockQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) { return nil, nil, nil } diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go index 382c84e63b..d996436e00 100644 --- a/storage/memoized_iterator_test.go +++ b/storage/memoized_iterator_test.go @@ -82,8 +82,8 @@ func BenchmarkMemoizedSeriesIterator(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for it.Next() != chunkenc.ValNone { - // scan everything + for it.Next() != chunkenc.ValNone { // nolint:revive + // Scan everything. } require.NoError(b, it.Err()) } diff --git a/storage/merge.go b/storage/merge.go index 8db1f7ae83..193a025227 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -722,12 +722,11 @@ func (c *compactChunkIterator) Next() bool { break } - if next.MinTime == prev.MinTime && - next.MaxTime == prev.MaxTime && - bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) { - // 1:1 duplicates, skip it. - } else { - // We operate on same series, so labels does not matter here. + // Only do something if it is not a perfect duplicate. + if next.MinTime != prev.MinTime || + next.MaxTime != prev.MaxTime || + !bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) { + // We operate on same series, so labels do not matter here. overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next)) if next.MaxTime > oMaxTime { oMaxTime = next.MaxTime diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 36d0d7c31d..27e2cc704d 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -524,7 +524,7 @@ func TestDecodeWriteRequest(t *testing.T) { require.Equal(t, writeRequestFixture, actual) } -func TestNilHistogramProto(t *testing.T) { +func TestNilHistogramProto(*testing.T) { // This function will panic if it impromperly handles nil // values, causing the test to fail. HistogramProtoToHistogram(prompb.Histogram{}) diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 5ec52de6b2..a57c3bf7b1 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -362,7 +362,7 @@ func TestReshard(t *testing.T) { c.waitForExpectedData(t) } -func TestReshardRaceWithStop(t *testing.T) { +func TestReshardRaceWithStop(*testing.T) { c := NewTestWriteClient() var m *QueueManager h := sync.Mutex{} @@ -864,10 +864,10 @@ func (c *TestBlockingWriteClient) Endpoint() string { // For benchmarking the send and not the receive side. type NopWriteClient struct{} -func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} } -func (c *NopWriteClient) Store(_ context.Context, req []byte) error { return nil } -func (c *NopWriteClient) Name() string { return "nopwriteclient" } -func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } +func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} } +func (c *NopWriteClient) Store(context.Context, []byte) error { return nil } +func (c *NopWriteClient) Name() string { return "nopwriteclient" } +func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } func BenchmarkSampleSend(b *testing.B) { // Send one sample per series, which is the typical remote_write case diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 9c787f17e5..e7a88ddc23 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -294,7 +294,7 @@ func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e return 0, nil } -func (m *mockAppendable) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { +func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if t < m.latestHistogram { return 0, storage.ErrOutOfOrderSample } diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index a17e0d1b98..cb075f3060 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -732,22 +732,22 @@ func (db *DB) StartTime() (int64, error) { } // Querier implements the Storage interface. -func (db *DB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { +func (db *DB) Querier(context.Context, int64, int64) (storage.Querier, error) { return nil, ErrUnsupported } // ChunkQuerier implements the Storage interface. -func (db *DB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { +func (db *DB) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) { return nil, ErrUnsupported } // ExemplarQuerier implements the Storage interface. -func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { +func (db *DB) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) { return nil, ErrUnsupported } // Appender implements storage.Storage. -func (db *DB) Appender(_ context.Context) storage.Appender { +func (db *DB) Appender(context.Context) storage.Appender { return db.appenderPool.Get().(storage.Appender) } @@ -823,7 +823,7 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo return 0, storage.ErrOutOfOrderSample } - // NOTE: always modify pendingSamples and sampleSeries together + // NOTE: always modify pendingSamples and sampleSeries together. a.pendingSamples = append(a.pendingSamples, record.RefSample{ Ref: series.ref, T: t, @@ -849,8 +849,8 @@ func (a *appender) getOrCreate(l labels.Labels) (series *memSeries, created bool return series, true } -func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { - // series references and chunk references are identical for agent mode. +func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + // Series references and chunk references are identical for agent mode. headRef := chunks.HeadSeriesRef(ref) s := a.series.GetByID(headRef) @@ -973,7 +973,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int return storage.SeriesRef(series.ref), nil } -func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { +func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { // TODO: Wire metadata in the Agent's appender. return 0, nil } diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index b462c6d9fd..67b706b2eb 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -107,7 +107,7 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) { // To get an appender, we must know the state it would have if we had // appended all existing data from scratch. We iterate through the end // and populate via the iterator's state. - for it.Next() == ValFloatHistogram { + for it.Next() == ValFloatHistogram { // nolint:revive } if err := it.Err(); err != nil { return nil, err diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 31d96ee7a9..90c16d1ea9 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -111,7 +111,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) { // 3. Now recycle an iterator that was never used to access anything. itX := c.Iterator(nil) - for itX.Next() == ValFloatHistogram { + for itX.Next() == ValFloatHistogram { // nolint:revive // Just iterate through without accessing anything. } it3 := c.iterator(itX) diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index 7b6a9cacb3..0d53f1f496 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -126,7 +126,7 @@ func (c *HistogramChunk) Appender() (Appender, error) { // To get an appender, we must know the state it would have if we had // appended all existing data from scratch. We iterate through the end // and populate via the iterator's state. - for it.Next() == ValHistogram { + for it.Next() == ValHistogram { // nolint:revive } if err := it.Err(); err != nil { return nil, err diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index 4bb146ccdb..45f31a3b4d 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -116,7 +116,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) { // 3. Now recycle an iterator that was never used to access anything. itX := c.Iterator(nil) - for itX.Next() == ValHistogram { + for itX.Next() == ValHistogram { // nolint:revive // Just iterate through without accessing anything. } it3 := c.iterator(itX) diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 2fa2f613cb..ba2d96d36b 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -99,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) { // To get an appender we must know the state it would have if we had // appended all existing data from scratch. // We iterate through the end and populate via the iterator's state. - for it.Next() != ValNone { + for it.Next() != ValNone { // nolint:revive } if err := it.Err(); err != nil { return nil, err @@ -152,11 +152,11 @@ type xorAppender struct { trailing uint8 } -func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) { +func (a *xorAppender) AppendHistogram(int64, *histogram.Histogram) { panic("appended a histogram to an xor chunk") } -func (a *xorAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) { +func (a *xorAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) { panic("appended a float histogram to an xor chunk") } diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index ac89ae3e59..20a4c2064b 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -503,10 +503,10 @@ func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper { func randomChunk(t *testing.T) chunkenc.Chunk { chunk := chunkenc.NewXORChunk() - len := rand.Int() % 120 + length := rand.Int() % 120 app, err := chunk.Appender() require.NoError(t, err) - for i := 0; i < len; i++ { + for i := 0; i < length; i++ { app.Append(rand.Int63(), rand.Float64()) } return chunk diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index d9a105f225..93bca179e0 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -467,8 +467,8 @@ func (erringBReader) Size() int64 { return 0 } type nopChunkWriter struct{} -func (nopChunkWriter) WriteChunks(chunks ...chunks.Meta) error { return nil } -func (nopChunkWriter) Close() error { return nil } +func (nopChunkWriter) WriteChunks(...chunks.Meta) error { return nil } +func (nopChunkWriter) Close() error { return nil } func samplesForRange(minTime, maxTime int64, maxSamplesPerChunk int) (ret [][]sample) { var curr []sample diff --git a/tsdb/db_test.go b/tsdb/db_test.go index c54fccf6f1..3e4d35f878 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1426,11 +1426,11 @@ type mockCompactorFailing struct { max int } -func (*mockCompactorFailing) Plan(dir string) ([]string, error) { +func (*mockCompactorFailing) Plan(string) ([]string, error) { return nil, nil } -func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) { +func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) (ulid.ULID, error) { if len(c.blocks) >= c.max { return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") } @@ -1458,7 +1458,7 @@ func (*mockCompactorFailing) Compact(string, []string, []*Block) (ulid.ULID, err return ulid.ULID{}, nil } -func (*mockCompactorFailing) CompactOOO(dest string, oooHead *OOOCompactionHead) (result []ulid.ULID, err error) { +func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) { return nil, fmt.Errorf("mock compaction failing CompactOOO") } diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 5ba3567e41..ca108ae91c 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -115,17 +115,17 @@ func NewExemplarMetrics(reg prometheus.Registerer) *ExemplarMetrics { // 1GB of extra memory, accounting for the fact that this is heap allocated space. // If len <= 0, then the exemplar storage is essentially a noop storage but can later be // resized to store exemplars. -func NewCircularExemplarStorage(len int64, m *ExemplarMetrics) (ExemplarStorage, error) { - if len < 0 { - len = 0 +func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStorage, error) { + if length < 0 { + length = 0 } c := &CircularExemplarStorage{ - exemplars: make([]*circularBufferEntry, len), - index: make(map[string]*indexEntry, len/estimatedExemplarsPerSeries), + exemplars: make([]*circularBufferEntry, length), + index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries), metrics: m, } - c.metrics.maxExemplars.Set(float64(len)) + c.metrics.maxExemplars.Set(float64(length)) return c, nil } diff --git a/tsdb/goversion/goversion_test.go b/tsdb/goversion/goversion_test.go index 9a7486d66f..853844fb93 100644 --- a/tsdb/goversion/goversion_test.go +++ b/tsdb/goversion/goversion_test.go @@ -24,4 +24,4 @@ import ( // // The blank import above is actually what invokes the test of this package. If // the import succeeds (the code compiles), the test passed. -func Test(t *testing.T) {} +func Test(*testing.T) {} diff --git a/tsdb/head_test.go b/tsdb/head_test.go index e80c197b23..39bcf4c78b 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many legitimately empty blocks in this file. package tsdb import ( @@ -103,7 +104,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) { b.Cleanup(func() { require.NoError(b, h.Close()) }) ts := int64(1000) - append := func() error { + appendSamples := func() error { var err error app := h.Appender(context.Background()) for _, s := range series[:seriesCount] { @@ -120,13 +121,13 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) { } // Init series, that's not what we're benchmarking here. - require.NoError(b, append()) + require.NoError(b, appendSamples()) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - require.NoError(b, append()) + require.NoError(b, appendSamples()) } }) } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 6a8a30d5ad..4da500a712 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many legitimately empty blocks in this file. package tsdb import ( diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 86d0e3b7b3..fd16c6ca8f 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package tsdb import ( diff --git a/tsdb/querier.go b/tsdb/querier.go index 4b3144c71d..f3852d517b 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -1085,7 +1085,7 @@ func newNopChunkReader() ChunkReader { } } -func (cr nopChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { +func (cr nopChunkReader) Chunk(chunks.Meta) (chunkenc.Chunk, error) { return cr.emptyChunk, nil } diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 78993d105f..89758a1d31 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -250,7 +250,7 @@ func BenchmarkQuerierSelect(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { ss := q.Select(sorted, nil, matcher) - for ss.Next() { + for ss.Next() { // nolint:revive } require.NoError(b, ss.Err()) } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index fa3dd2418f..3a92e49516 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package tsdb import ( diff --git a/tsdb/wal.go b/tsdb/wal.go index e0bc1ec699..93d445a126 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package tsdb import ( diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 97d251b3ae..737520e76a 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -533,7 +533,7 @@ func TestReaderData(t *testing.T) { require.NoError(t, err) reader := fn(sr) - for reader.Next() { + for reader.Next() { // nolint:revive } require.NoError(t, reader.Err()) diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index ed8a9df2e3..d36934bbd3 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -164,7 +164,7 @@ func TestWALRepair_ReadingError(t *testing.T) { sr := NewSegmentBufReader(s) require.NoError(t, err) r := NewReader(sr) - for r.Next() { + for r.Next() { // nolint:revive } // Close the segment so we don't break things on Windows. diff --git a/util/logging/dedupe_test.go b/util/logging/dedupe_test.go index ad234445b2..e05d6454c5 100644 --- a/util/logging/dedupe_test.go +++ b/util/logging/dedupe_test.go @@ -22,7 +22,7 @@ import ( type counter int -func (c *counter) Log(keyvals ...interface{}) error { +func (c *counter) Log(...interface{}) error { (*c)++ return nil } diff --git a/util/testutil/context.go b/util/testutil/context.go index cf730421b4..c1f4a831ce 100644 --- a/util/testutil/context.go +++ b/util/testutil/context.go @@ -37,6 +37,6 @@ func (c *MockContext) Err() error { } // Value ignores the Value and always returns nil -func (c *MockContext) Value(key interface{}) interface{} { +func (c *MockContext) Value(interface{}) interface{} { return nil } diff --git a/util/testutil/roundtrip.go b/util/testutil/roundtrip.go index a93991a13e..364e0c2642 100644 --- a/util/testutil/roundtrip.go +++ b/util/testutil/roundtrip.go @@ -22,7 +22,7 @@ type roundTrip struct { theError error } -func (rt *roundTrip) RoundTrip(r *http.Request) (*http.Response, error) { +func (rt *roundTrip) RoundTrip(*http.Request) (*http.Response, error) { return rt.theResponse, rt.theError } diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go index 7dd41dcedd..acdd6f7bea 100644 --- a/util/treecache/treecache.go +++ b/util/treecache/treecache.go @@ -116,7 +116,7 @@ func (tc *ZookeeperTreeCache) Stop() { tc.stop <- struct{}{} go func() { // Drain tc.head.events so that go routines can make progress and exit. - for range tc.head.events { + for range tc.head.events { // nolint:revive } }() go func() { diff --git a/web/api/v1/api.go b/web/api/v1/api.go index dde814eb0f..0624cf2d8b 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -117,7 +117,7 @@ type RulesRetriever interface { type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats -func defaultStatsRenderer(ctx context.Context, s *stats.Statistics, param string) stats.QueryStats { +func defaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats { if param != "" { return stats.NewQueryStats(s) } @@ -392,7 +392,7 @@ func invalidParamError(err error, parameter string) apiFuncResult { }, nil, nil} } -func (api *API) options(r *http.Request) apiFuncResult { +func (api *API) options(*http.Request) apiFuncResult { return apiFuncResult{nil, nil, nil, nil} } @@ -1565,7 +1565,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult { }{name}, nil, nil, nil} } -func (api *API) cleanTombstones(r *http.Request) apiFuncResult { +func (api *API) cleanTombstones(*http.Request) apiFuncResult { if !api.enableAdmin { return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil} } @@ -1764,7 +1764,7 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteObjectEnd() } -func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { +func marshalSeriesJSONIsEmpty(unsafe.Pointer) bool { return false } @@ -1817,7 +1817,7 @@ func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteObjectEnd() } -func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool { +func marshalSampleJSONIsEmpty(unsafe.Pointer) bool { return false } @@ -1841,7 +1841,7 @@ func marshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteArrayEnd() } -func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { +func marshalPointJSONIsEmpty(unsafe.Pointer) bool { return false } @@ -1878,6 +1878,6 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteObjectEnd() } -func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool { +func marshalExemplarJSONEmpty(unsafe.Pointer) bool { return false } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index efce042214..04aab31af4 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -2560,9 +2560,9 @@ type fakeDB struct { err error } -func (f *fakeDB) CleanTombstones() error { return f.err } -func (f *fakeDB) Delete(mint, maxt int64, ms ...*labels.Matcher) error { return f.err } -func (f *fakeDB) Snapshot(dir string, withHead bool) error { return f.err } +func (f *fakeDB) CleanTombstones() error { return f.err } +func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err } +func (f *fakeDB) Snapshot(string, bool) error { return f.err } func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) { dbDir, err := os.MkdirTemp("", "tsdb-api-ready") if err != nil { diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index da0d4b3f2e..4947afd813 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package v1 import ( diff --git a/web/web.go b/web/web.go index 9d63094f69..cac41502d2 100644 --- a/web/web.go +++ b/web/web.go @@ -755,14 +755,14 @@ func toFloat64(f *io_prometheus_client.MetricFamily) float64 { return math.NaN() } -func (h *Handler) version(w http.ResponseWriter, r *http.Request) { +func (h *Handler) version(w http.ResponseWriter, _ *http.Request) { dec := json.NewEncoder(w) if err := dec.Encode(h.versionInfo); err != nil { http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError) } } -func (h *Handler) quit(w http.ResponseWriter, r *http.Request) { +func (h *Handler) quit(w http.ResponseWriter, _ *http.Request) { var closed bool h.quitOnce.Do(func() { closed = true @@ -774,7 +774,7 @@ func (h *Handler) quit(w http.ResponseWriter, r *http.Request) { } } -func (h *Handler) reload(w http.ResponseWriter, r *http.Request) { +func (h *Handler) reload(w http.ResponseWriter, _ *http.Request) { rc := make(chan error) h.reloadCh <- rc if err := <-rc; err != nil { From 5b53aa11083233963bd477d19134492355d9ed0f Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 12 Apr 2023 16:14:31 +0200 Subject: [PATCH 11/26] style: Replace `else if` cascades with `switch` Wiser coders than myself have come to the conclusion that a `switch` statement is almost always superior to a statement that includes any `else if`. The exceptions that I have found in our codebase are just these two: * The `if else` is followed by an additional statement before the next condition (separated by a `;`). * The whole thing is within a `for` loop and `break` statements are used. In this case, using `switch` would require tagging the `for` loop, which probably tips the balance. Why are `switch` statements more readable? For one, fewer curly braces. But more importantly, the conditions all have the same alignment, so the whole thing follows the natural flow of going down a list of conditions. With `else if`, in contrast, all conditions but the first are "hidden" behind `} else if `, harder to spot and (for no good reason) presented differently from the first condition. I'm sure the aforemention wise coders can list even more reasons. In any case, I like it so much that I have found myself recommending it in code reviews. I would like to make it a habit in our code base, without making it a hard requirement that we would test on the CI. But for that, there has to be a role model, so this commit eliminates all `if else` occurrences, unless it is autogenerated code or fits one of the exceptions above. Signed-off-by: beorn7 --- cmd/prometheus/main_unix_test.go | 6 +- cmd/promtool/tsdb.go | 5 +- discovery/dns/dns.go | 7 ++- discovery/kubernetes/kubernetes.go | 7 ++- discovery/linode/linode.go | 7 ++- discovery/marathon/marathon.go | 12 ++-- .../remote_storage_adapter/influxdb/client.go | 7 ++- model/histogram/float_histogram.go | 5 +- model/labels/labels.go | 14 +++-- promql/engine.go | 56 +++++++++++-------- promql/functions.go | 8 ++- promql/parser/ast.go | 9 +-- promql/parser/lex.go | 19 ++++--- promql/parser/parse.go | 28 +++++----- promql/parser/printer.go | 36 +++++++----- promql/quantile.go | 10 ++-- rules/alerting_test.go | 6 +- rules/manager_test.go | 7 ++- rules/recording_test.go | 6 +- scrape/manager.go | 5 +- scrape/scrape.go | 10 ++-- scrape/scrape_test.go | 36 +++++++----- storage/fanout.go | 5 +- storage/merge.go | 7 ++- storage/remote/codec.go | 23 ++++---- storage/remote/ewma.go | 5 +- storage/remote/queue_manager.go | 10 ++-- tsdb/agent/db.go | 5 +- tsdb/chunkenc/xor.go | 7 ++- tsdb/chunks/head_chunks.go | 5 +- tsdb/db.go | 10 ++-- tsdb/head_append.go | 36 +++++++----- tsdb/head_read.go | 13 +++-- tsdb/head_test.go | 29 +++++----- tsdb/index/postings.go | 22 ++++---- tsdb/ooo_head_read.go | 11 ++-- tsdb/querier.go | 25 +++++---- tsdb/tombstones/tombstones.go | 5 +- tsdb/wal.go | 20 ++++--- tsdb/wlog/live_reader.go | 5 +- tsdb/wlog/watcher.go | 10 ++-- util/treecache/treecache.go | 16 +++--- web/api/v1/api.go | 8 ++- web/federate_test.go | 10 ++-- 44 files changed, 340 insertions(+), 253 deletions(-) diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go index b49110ea91..7224e25d70 100644 --- a/cmd/prometheus/main_unix_test.go +++ b/cmd/prometheus/main_unix_test.go @@ -72,9 +72,11 @@ Loop: if !startedOk { t.Fatal("prometheus didn't start in the specified timeout") } - if err := prom.Process.Kill(); err == nil { + switch err := prom.Process.Kill(); { + case err == nil: t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal") - } else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected! + case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt": + // TODO: find a better way to detect when the process didn't exit as expected! t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr) } } diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 0e0cdb863a..84aa43a9c4 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -403,14 +403,15 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) return nil, nil, err } var block tsdb.BlockReader - if blockID != "" { + switch { + case blockID != "": for _, b := range blocks { if b.Meta().ULID.String() == blockID { block = b break } } - } else if len(blocks) > 0 { + case len(blocks) > 0: block = blocks[len(blocks)-1] } if block == nil { diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index 2b11c242ab..96e07254f0 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -285,21 +285,22 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms for _, lname := range conf.NameList(name) { response, err := lookupFromAnyServer(lname, qtype, conf, logger) - if err != nil { + switch { + case err != nil: // We can't go home yet, because a later name // may give us a valid, successful answer. However // we can no longer say "this name definitely doesn't // exist", because we did not get that answer for // at least one name. allResponsesValid = false - } else if response.Rcode == dns.RcodeSuccess { + case response.Rcode == dns.RcodeSuccess: // Outcome 1: GOLD! return response, nil } } if allResponsesValid { - // Outcome 2: everyone says NXDOMAIN, that's good enough for me + // Outcome 2: everyone says NXDOMAIN, that's good enough for me. return &dns.Msg{}, nil } // Outcome 3: boned. diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 0f03e2cdb7..a44bd513ce 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -299,12 +299,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { err error ownNamespace string ) - if conf.KubeConfig != "" { + switch { + case conf.KubeConfig != "": kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig) if err != nil { return nil, err } - } else if conf.APIServer.URL == nil { + case conf.APIServer.URL == nil: // Use the Kubernetes provided pod service account // as described in https://kubernetes.io/docs/admin/service-accounts-admin/ kcfg, err = rest.InClusterConfig() @@ -324,7 +325,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { } level.Info(l).Log("msg", "Using pod service account via in-cluster config") - } else { + default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { return nil, err diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 0fd0a2c370..449e13cd89 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -250,19 +250,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro continue } - if detailedIP.Public && publicIPv4 == "" { + switch { + case detailedIP.Public && publicIPv4 == "": publicIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { publicIPv4RDNS = detailedIP.RDNS } - } else if !detailedIP.Public && privateIPv4 == "" { + case !detailedIP.Public && privateIPv4 == "": privateIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { privateIPv4RDNS = detailedIP.RDNS } - } else { + default: extraIPs = append(extraIPs, detailedIP.Address) } } diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 079f93ad0b..cfd3e2c083 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -136,9 +136,10 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { return nil, err } - if len(conf.AuthToken) > 0 { + switch { + case len(conf.AuthToken) > 0: rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt) - } else if len(conf.AuthTokenFile) > 0 { + case len(conf.AuthTokenFile) > 0: rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt) } if err != nil { @@ -400,19 +401,20 @@ func targetsForApp(app *app) []model.LabelSet { var labels []map[string]string var prefix string - if len(app.Container.PortMappings) != 0 { + switch { + case len(app.Container.PortMappings) != 0: // In Marathon 1.5.x the "container.docker.portMappings" object was moved // to "container.portMappings". ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.Container.Docker.PortMappings) != 0 { + case len(app.Container.Docker.PortMappings) != 0: // Prior to Marathon 1.5 the port mappings could be found at the path // "container.docker.portMappings". ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.PortDefinitions) != 0 { + case len(app.PortDefinitions) != 0: // PortDefinitions deprecates the "ports" array and can be used to specify // a list of ports with metadata in case a mapping is not required. ports = make([]uint32, len(app.PortDefinitions)) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index fffbc9c2ae..959656aa8f 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -290,13 +290,14 @@ func mergeSamples(a, b []prompb.Sample) []prompb.Sample { result := make([]prompb.Sample, 0, len(a)+len(b)) i, j := 0, 0 for i < len(a) && j < len(b) { - if a[i].Timestamp < b[j].Timestamp { + switch { + case a[i].Timestamp < b[j].Timestamp: result = append(result, a[i]) i++ - } else if a[i].Timestamp > b[j].Timestamp { + case a[i].Timestamp > b[j].Timestamp: result = append(result, b[j]) j++ - } else { + default: result = append(result, a[i]) i++ j++ diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index cd73083bbd..f95f0051c9 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -824,10 +824,11 @@ mergeLoop: // Merge together all buckets from the original schema that fall into origIdx += span.Offset } currIdx := i.targetIdx(origIdx) - if firstPass { + switch { + case firstPass: i.currIdx = currIdx firstPass = false - } else if currIdx != i.currIdx { + case currIdx != i.currIdx: // Reached next bucket in targetSchema. // Do not actually forward to the next bucket, but break out. break mergeLoop diff --git a/model/labels/labels.go b/model/labels/labels.go index b7398d17f9..93524ddcfc 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -169,11 +169,12 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { b = b[:0] i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: b = append(b, ls[i].Name...) b = append(b, seps[0]) b = append(b, ls[i].Value...) @@ -213,11 +214,12 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { b.WriteByte(labelSep) i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: if b.Len() > 1 { b.WriteByte(seps[0]) } diff --git a/promql/engine.go b/promql/engine.go index 21b894b936..1f493f129c 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -400,7 +400,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { +func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -416,7 +416,7 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { expr, err := parser.ParseExpr(qs) if err != nil { return nil, err @@ -1979,7 +1979,7 @@ func (ev *evaluator) matrixIterSlice( // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; histograms[drop].T < mint; drop++ { + for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive } ev.currentSamples -= drop copy(histograms, histograms[drop:]) @@ -2096,13 +2096,13 @@ func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, } func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { - if matching.Card != parser.CardManyToMany { + switch { + case matching.Card != parser.CardManyToMany: panic("set operations must only use many-to-many matching") - } - if len(lhs) == 0 { // Short-circuit. + case len(lhs) == 0: // Short-circuit. enh.Out = append(enh.Out, rhs...) return enh.Out - } else if len(rhs) == 0 { + case len(rhs) == 0: enh.Out = append(enh.Out, lhs...) return enh.Out } @@ -2221,13 +2221,14 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * hl, hr = hr, hl } floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) - if returnBool { + switch { + case returnBool: if keep { floatValue = 1.0 } else { floatValue = 0.0 } - } else if !keep { + case !keep: continue } metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) @@ -2514,14 +2515,15 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if !ok { var m labels.Labels enh.resetBuilder(metric) - if without { + switch { + case without: enh.lb.Del(grouping...) enh.lb.Del(labels.MetricName) m = enh.lb.Labels() - } else if len(grouping) > 0 { + case len(grouping) > 0: enh.lb.Keep(grouping...) m = enh.lb.Labels() - } else { + default: m = labels.EmptyLabels() } newAgg := &groupedAggregation{ @@ -2530,9 +2532,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without mean: s.F, groupCount: 1, } - if s.H == nil { + switch { + case s.H == nil: newAgg.hasFloat = true - } else if op == parser.SUM { + case op == parser.SUM: newAgg.histogramValue = s.H.Copy() newAgg.hasHistogram = true } @@ -2542,9 +2545,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without inputVecLen := int64(len(vec)) resultSize := k - if k > inputVecLen { + switch { + case k > inputVecLen: resultSize = inputVecLen - } else if k == 0 { + case k == 0: resultSize = 1 } switch op { @@ -2637,12 +2641,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.TOPK: // We build a heap of up to k elements, with the smallest element at heap[0]. - if int64(len(group.heap)) < k { + switch { + case int64(len(group.heap)) < k: heap.Push(&group.heap, &Sample{ F: s.F, Metric: s.Metric, }) - } else if group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)) { + case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): // This new element is bigger than the previous smallest element - overwrite that. group.heap[0] = Sample{ F: s.F, @@ -2655,12 +2660,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.BOTTOMK: // We build a heap of up to k elements, with the biggest element at heap[0]. - if int64(len(group.reverseHeap)) < k { + switch { + case int64(len(group.reverseHeap)) < k: heap.Push(&group.reverseHeap, &Sample{ F: s.F, Metric: s.Metric, }) - } else if group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)) { + case group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)): // This new element is smaller than the previous biggest element - overwrite that. group.reverseHeap[0] = Sample{ F: s.F, @@ -2819,9 +2825,10 @@ func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr { func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { switch n := expr.(type) { case *parser.VectorSelector: - if n.StartOrEnd == parser.START { + switch n.StartOrEnd { + case parser.START: n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) - } else if n.StartOrEnd == parser.END { + case parser.END: n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) } return n.Timestamp != nil @@ -2878,9 +2885,10 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { if isInvariant { n.Expr = newStepInvariantExpr(n.Expr) } - if n.StartOrEnd == parser.START { + switch n.StartOrEnd { + case parser.START: n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) - } else if n.StartOrEnd == parser.END { + case parser.END: n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) } return n.Timestamp != nil diff --git a/promql/functions.go b/promql/functions.go index 0c22cb44c7..2983de4fde 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -804,12 +804,14 @@ func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) V // === sgn(Vector parser.ValueTypeVector) Vector === func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return simpleFunc(vals, enh, func(v float64) float64 { - if v < 0 { + switch { + case v < 0: return -1 - } else if v > 0 { + case v > 0: return 1 + default: + return v } - return v }) } diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 190af2d590..f156fc6024 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -368,13 +368,14 @@ func Children(node Node) []Node { case *AggregateExpr: // While this does not look nice, it should avoid unnecessary allocations // caused by slice resizing - if n.Expr == nil && n.Param == nil { + switch { + case n.Expr == nil && n.Param == nil: return nil - } else if n.Expr == nil { + case n.Expr == nil: return []Node{n.Param} - } else if n.Param == nil { + case n.Param == nil: return []Node{n.Expr} - } else { + default: return []Node{n.Expr, n.Param} } case *BinaryExpr: diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 657dc28095..fe5a8abfeb 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -347,9 +347,10 @@ func lexStatements(l *Lexer) stateFn { switch r := l.next(); { case r == eof: - if l.parenDepth != 0 { + switch { + case l.parenDepth != 0: return l.errorf("unclosed left parenthesis") - } else if l.bracketOpen { + case l.bracketOpen: return l.errorf("unclosed left bracket") } l.emit(EOF) @@ -371,12 +372,13 @@ func lexStatements(l *Lexer) stateFn { case r == '^': l.emit(POW) case r == '=': - if t := l.peek(); t == '=' { + switch t := l.peek(); t { + case '=': l.next() l.emit(EQLC) - } else if t == '~' { + case '~': return l.errorf("unexpected character after '=': %q", t) - } else { + default: l.emit(EQL) } case r == '!': @@ -791,11 +793,12 @@ Loop: default: l.backup() word := l.input[l.start:l.pos] - if kw, ok := key[strings.ToLower(word)]; ok { + switch kw, ok := key[strings.ToLower(word)]; { + case ok: l.emit(kw) - } else if !strings.Contains(word, ":") { + case !strings.Contains(word, ":"): l.emit(IDENTIFIER) - } else { + default: l.emit(METRIC_IDENTIFIER) } break Loop diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 3c32f3c05b..e69ed4595c 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -270,14 +270,15 @@ var errUnexpected = errors.New("unexpected error") // recover is the handler that turns panics into returns from the top level of Parse. func (p *parser) recover(errp *error) { e := recover() - if _, ok := e.(runtime.Error); ok { + switch _, ok := e.(runtime.Error); { + case ok: // Print the stack trace but do not inhibit the running application. buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf) *errp = errUnexpected - } else if e != nil { + case e != nil: *errp = e.(error) } } @@ -518,20 +519,18 @@ func (p *parser) checkAST(node Node) (typ ValueType) { p.addParseErrf(n.RHS.PositionRange(), "binary expression must contain only scalar and instant vector types") } - if (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil { + switch { + case (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil: if len(n.VectorMatching.MatchingLabels) > 0 { p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors") } n.VectorMatching = nil - } else { - // Both operands are Vectors. - if n.Op.IsSetOperator() { - if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { - p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op) - } - if n.VectorMatching.Card != CardManyToMany { - p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many") - } + case n.Op.IsSetOperator(): // Both operands are Vectors. + if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { + p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op) + } + if n.VectorMatching.Card != CardManyToMany { + p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many") } } @@ -708,9 +707,10 @@ func (p *parser) addOffset(e Node, offset time.Duration) { } // it is already ensured by parseDuration func that there never will be a zero offset modifier - if *orgoffsetp != 0 { + switch { + case *orgoffsetp != 0: p.addParseErrf(e.PositionRange(), "offset may not be set multiple times") - } else if orgoffsetp != nil { + case orgoffsetp != nil: *orgoffsetp = offset } diff --git a/promql/parser/printer.go b/promql/parser/printer.go index 1f15eeef33..ff171f2152 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -124,17 +124,19 @@ func (node *MatrixSelector) String() string { // Copy the Vector selector before changing the offset vecSelector := *node.VectorSelector.(*VectorSelector) offset := "" - if vecSelector.OriginalOffset > time.Duration(0) { + switch { + case vecSelector.OriginalOffset > time.Duration(0): offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset)) - } else if vecSelector.OriginalOffset < time.Duration(0) { + case vecSelector.OriginalOffset < time.Duration(0): offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset)) } at := "" - if vecSelector.Timestamp != nil { + switch { + case vecSelector.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*vecSelector.Timestamp)/1000.0) - } else if vecSelector.StartOrEnd == START { + case vecSelector.StartOrEnd == START: at = " @ start()" - } else if vecSelector.StartOrEnd == END { + case vecSelector.StartOrEnd == END: at = " @ end()" } @@ -162,17 +164,19 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string { step = model.Duration(node.Step).String() } offset := "" - if node.OriginalOffset > time.Duration(0) { + switch { + case node.OriginalOffset > time.Duration(0): offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) - } else if node.OriginalOffset < time.Duration(0) { + case node.OriginalOffset < time.Duration(0): offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset)) } at := "" - if node.Timestamp != nil { + switch { + case node.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) - } else if node.StartOrEnd == START { + case node.StartOrEnd == START: at = " @ start()" - } else if node.StartOrEnd == END { + case node.StartOrEnd == END: at = " @ end()" } return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset) @@ -207,17 +211,19 @@ func (node *VectorSelector) String() string { labelStrings = append(labelStrings, matcher.String()) } offset := "" - if node.OriginalOffset > time.Duration(0) { + switch { + case node.OriginalOffset > time.Duration(0): offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) - } else if node.OriginalOffset < time.Duration(0) { + case node.OriginalOffset < time.Duration(0): offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset)) } at := "" - if node.Timestamp != nil { + switch { + case node.Timestamp != nil: at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) - } else if node.StartOrEnd == START { + case node.StartOrEnd == START: at = " @ start()" - } else if node.StartOrEnd == END { + case node.StartOrEnd == END: at = " @ end()" } diff --git a/promql/quantile.go b/promql/quantile.go index aaead671c7..78d0bbaf0c 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -169,11 +169,12 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { } } if bucket.Lower < 0 && bucket.Upper > 0 { - if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 { + switch { + case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0: // The result is in the zero bucket and the histogram has only // positive buckets. So we consider 0 to be the lower bound. bucket.Lower = 0 - } else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 { + case len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0: // The result is in the zero bucket and the histogram has only // negative buckets. So we consider 0 to be the upper bound. bucket.Upper = 0 @@ -244,12 +245,13 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 for it.Next() { b := it.At() if b.Lower < 0 && b.Upper > 0 { - if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 { + switch { + case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0: // This is the zero bucket and the histogram has only // positive buckets. So we consider 0 to be the lower // bound. b.Lower = 0 - } else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 { + case len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0: // This is in the zero bucket and the histogram has only // negative buckets. So we consider 0 to be the upper // bound. diff --git a/rules/alerting_test.go b/rules/alerting_test.go index 13aab98048..8cd0da2815 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -587,10 +587,10 @@ func TestAlertingRuleLimit(t *testing.T) { evalTime := time.Unix(0, 0) for _, test := range tests { - _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit) - if err != nil { + switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); { + case err != nil: require.EqualError(t, err, test.err) - } else if test.err != "" { + case test.err != "": t.Errorf("Expected errror %s, got none", test.err) } } diff --git a/rules/manager_test.go b/rules/manager_test.go index 16bb080f55..26a7909644 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -481,17 +481,18 @@ func TestForStateRestore(t *testing.T) { }) // Checking if we have restored it correctly. - if tst.noRestore { + switch { + case tst.noRestore: require.Equal(t, tst.num, len(got)) for _, e := range got { require.Equal(t, e.ActiveAt, restoreTime) } - } else if tst.gracePeriod { + case tst.gracePeriod: require.Equal(t, tst.num, len(got)) for _, e := range got { require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime)) } - } else { + default: exp := tst.alerts require.Equal(t, len(exp), len(got)) sortAlerts(exp) diff --git a/rules/recording_test.go b/rules/recording_test.go index 61f47e0487..35a0b1a0bd 100644 --- a/rules/recording_test.go +++ b/rules/recording_test.go @@ -223,10 +223,10 @@ func TestRecordingRuleLimit(t *testing.T) { evalTime := time.Unix(0, 0) for _, test := range tests { - _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit) - if err != nil { + switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); { + case err != nil: require.EqualError(t, err, test.err) - } else if test.err != "" { + case test.err != "": t.Errorf("Expected error %s, got none", test.err) } } diff --git a/scrape/manager.go b/scrape/manager.go index 69a0eaa1f7..d75fe30cf5 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -288,10 +288,11 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { // Cleanup and reload pool if the configuration has changed. var failed bool for name, sp := range m.scrapePools { - if cfg, ok := m.scrapeConfigs[name]; !ok { + switch cfg, ok := m.scrapeConfigs[name]; { + case !ok: sp.stop() delete(m.scrapePools, name) - } else if !reflect.DeepEqual(sp.config, cfg) { + case !reflect.DeepEqual(sp.config, cfg): err := sp.reload(cfg) if err != nil { level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) diff --git a/scrape/scrape.go b/scrape/scrape.go index f38527ff30..680585af89 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -503,9 +503,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { // Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage. nonEmpty := false t.LabelsRange(func(l labels.Label) { nonEmpty = true }) - if nonEmpty { + switch { + case nonEmpty: all = append(all, t) - } else if !t.discoveredLabels.IsEmpty() { + case !t.discoveredLabels.IsEmpty(): sp.droppedTargets = append(sp.droppedTargets, t) } } @@ -946,9 +947,10 @@ func (c *scrapeCache) iterDone(flushCache bool) { count := len(c.series) + len(c.droppedSeries) + len(c.metadata) c.metaMtx.Unlock() - if flushCache { + switch { + case flushCache: c.successfulCount = count - } else if count > c.successfulCount*2+1000 { + case count > c.successfulCount*2+1000: // If a target had varying labels in scrapes that ultimately failed, // the caches would grow indefinitely. Force a flush when this happens. // We use the heuristic that this is a doubling of the cache size diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a8028b652a..6b4b2d5f57 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -724,9 +724,10 @@ func TestScrapeLoopStop(t *testing.T) { // All samples in a scrape must have the same timestamp. var ts int64 for i, s := range appender.result { - if i%6 == 0 { + switch { + case i%6 == 0: ts = s.t - } else if s.t != ts { + case s.t != ts: t.Fatalf("Unexpected multiple timestamps within single scrape") } } @@ -1139,10 +1140,11 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { numScrapes++ - if numScrapes == 1 { + switch numScrapes { + case 1: w.Write([]byte("metric_a 42\n")) return nil - } else if numScrapes == 5 { + case 5: cancel() } return errors.New("scrape failed") @@ -1200,13 +1202,14 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { numScrapes++ - if numScrapes == 1 { + switch numScrapes { + case 1: w.Write([]byte("metric_a 42\n")) return nil - } else if numScrapes == 2 { + case 2: w.Write([]byte("7&-\n")) return nil - } else if numScrapes == 3 { + case 3: cancel() } return errors.New("scrape failed") @@ -1265,14 +1268,15 @@ func TestScrapeLoopCache(t *testing.T) { numScrapes := 0 scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { - if numScrapes == 1 || numScrapes == 2 { + switch numScrapes { + case 1, 2: if _, ok := sl.cache.series["metric_a"]; !ok { t.Errorf("metric_a missing from cache after scrape %d", numScrapes) } if _, ok := sl.cache.series["metric_b"]; !ok { t.Errorf("metric_b missing from cache after scrape %d", numScrapes) } - } else if numScrapes == 3 { + case 3: if _, ok := sl.cache.series["metric_a"]; !ok { t.Errorf("metric_a missing from cache after scrape %d", numScrapes) } @@ -1283,13 +1287,14 @@ func TestScrapeLoopCache(t *testing.T) { numScrapes++ - if numScrapes == 1 { + switch numScrapes { + case 1: w.Write([]byte("metric_a 42\nmetric_b 43\n")) return nil - } else if numScrapes == 3 { + case 3: w.Write([]byte("metric_a 44\n")) return nil - } else if numScrapes == 4 { + case 4: cancel() } return fmt.Errorf("scrape failed") @@ -2280,11 +2285,12 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { go func() { _, err := ts.scrape(ctx, io.Discard) - if err == nil { + switch { + case err == nil: errc <- errors.New("Expected error but got nil") - } else if ctx.Err() != context.Canceled { + case ctx.Err() != context.Canceled: errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err()) - } else { + default: close(errc) } }() diff --git a/storage/fanout.go b/storage/fanout.go index 4f995afbac..a9db4f6280 100644 --- a/storage/fanout.go +++ b/storage/fanout.go @@ -222,9 +222,10 @@ func (f *fanoutAppender) Rollback() (err error) { for _, appender := range f.secondaries { rollbackErr := appender.Rollback() - if err == nil { + switch { + case err == nil: err = rollbackErr - } else if rollbackErr != nil { + case rollbackErr != nil: level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr) } } diff --git a/storage/merge.go b/storage/merge.go index 193a025227..c0665d720b 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -197,13 +197,14 @@ func mergeStrings(a, b []string) []string { res := make([]string, 0, maxl*10/9) for len(a) > 0 && len(b) > 0 { - if a[0] == b[0] { + switch { + case a[0] == b[0]: res = append(res, a[0]) a, b = a[1:], b[1:] - } else if a[0] < b[0] { + case a[0] < b[0]: res = append(res, a[0]) a = a[1:] - } else { + default: res = append(res, b[0]) b = b[1:] } diff --git a/storage/remote/codec.go b/storage/remote/codec.go index bfbd08d24b..2ceed4de11 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -291,13 +291,14 @@ func MergeLabels(primary, secondary []prompb.Label) []prompb.Label { result := make([]prompb.Label, 0, len(primary)+len(secondary)) i, j := 0, 0 for i < len(primary) && j < len(secondary) { - if primary[i].Name < secondary[j].Name { + switch { + case primary[i].Name < secondary[j].Name: result = append(result, primary[i]) i++ - } else if primary[i].Name > secondary[j].Name { + case primary[i].Name > secondary[j].Name: result = append(result, secondary[j]) j++ - } else { + default: result = append(result, primary[i]) i++ j++ @@ -429,7 +430,8 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { return c.series.histograms[n+c.histogramsCur].Timestamp >= t }) - if c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms) { + switch { + case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms): // If float samples and histogram samples have overlapping timestamps prefer the float samples. if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp { c.curValType = chunkenc.ValFloat @@ -445,9 +447,9 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { c.floatsCur-- } } - } else if c.floatsCur < len(c.series.floats) { + case c.floatsCur < len(c.series.floats): c.curValType = chunkenc.ValFloat - } else if c.histogramsCur < len(c.series.histograms) { + case c.histogramsCur < len(c.series.histograms): c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) } @@ -515,18 +517,19 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType { } c.curValType = chunkenc.ValNone - if peekFloatTS < peekHistTS { + switch { + case peekFloatTS < peekHistTS: c.floatsCur++ c.curValType = chunkenc.ValFloat - } else if peekHistTS < peekFloatTS { + case peekHistTS < peekFloatTS: c.histogramsCur++ c.curValType = chunkenc.ValHistogram - } else if peekFloatTS == noTS && peekHistTS == noTS { + case peekFloatTS == noTS && peekHistTS == noTS: // This only happens when the iterator is exhausted; we set the cursors off the end to prevent // Seek() from returning anything afterwards. c.floatsCur = len(c.series.floats) c.histogramsCur = len(c.series.histograms) - } else { + default: // Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms // anyway otherwise the histogram sample will get selected on the next call to Next(). c.floatsCur++ diff --git a/storage/remote/ewma.go b/storage/remote/ewma.go index c7fb0289b0..ea4472c494 100644 --- a/storage/remote/ewma.go +++ b/storage/remote/ewma.go @@ -55,9 +55,10 @@ func (r *ewmaRate) tick() { r.mutex.Lock() defer r.mutex.Unlock() - if r.init { + switch { + case r.init: r.lastRate += r.alpha * (instantRate - r.lastRate) - } else if newEvents > 0 { + case newEvents > 0: r.init = true r.lastRate = instantRate } diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 62bd17a66d..10fb6d153d 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1030,9 +1030,10 @@ func (t *QueueManager) calculateDesiredShards() int { return t.numShards } - if numShards > t.cfg.MaxShards { + switch { + case numShards > t.cfg.MaxShards: numShards = t.cfg.MaxShards - } else if numShards < t.cfg.MinShards { + case numShards < t.cfg.MinShards: numShards = t.cfg.MinShards } return numShards @@ -1575,10 +1576,11 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l } sleepDuration = backoff - if backoffErr.retryAfter > 0 { + switch { + case backoffErr.retryAfter > 0: sleepDuration = backoffErr.retryAfter level.Info(l).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration) - } else if backoffErr.retryAfter < 0 { + case backoffErr.retryAfter < 0: level.Debug(l).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism") } diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index cb075f3060..3343ee18ef 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -951,7 +951,8 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int return 0, storage.ErrOutOfOrderSample } - if h != nil { + switch { + case h != nil: // NOTE: always modify pendingHistograms and histogramSeries together a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{ Ref: series.ref, @@ -959,7 +960,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int H: h, }) a.histogramSeries = append(a.histogramSeries, series) - } else if fh != nil { + case fh != nil: // NOTE: always modify pendingFloatHistograms and floatHistogramSeries together a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{ Ref: series.ref, diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index ba2d96d36b..133ed9952a 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -164,14 +164,15 @@ func (a *xorAppender) Append(t int64, v float64) { var tDelta uint64 num := binary.BigEndian.Uint16(a.b.bytes()) - if num == 0 { + switch num { + case 0: buf := make([]byte, binary.MaxVarintLen64) for _, b := range buf[:binary.PutVarint(buf, t)] { a.b.writeByte(b) } a.b.writeBits(math.Float64bits(v), 64) - } else if num == 1 { + case 1: tDelta = uint64(t - a.t) buf := make([]byte, binary.MaxVarintLen64) @@ -181,7 +182,7 @@ func (a *xorAppender) Append(t int64, v float64) { a.writeVDelta(v) - } else { + default: tDelta = uint64(t - a.t) dod := int64(tDelta - a.tDelta) diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index a7ff90475e..bcdab21254 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -999,9 +999,10 @@ func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error { cdm.readPathMtx.RLock() lastSeq := 0 for seg := range cdm.mmappedChunkFiles { - if seg >= cerr.FileIndex { + switch { + case seg >= cerr.FileIndex: segs = append(segs, seg) - } else if seg > lastSeq { + case seg > lastSeq: lastSeq = seg } } diff --git a/tsdb/db.go b/tsdb/db.go index 659251c3ca..45c0771b1c 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -963,10 +963,11 @@ func (db *DB) ApplyConfig(conf *config.Config) error { // Create WBL if it was not present and if OOO is enabled with WAL enabled. var wblog *wlog.WL var err error - if db.head.wbl != nil { + switch { + case db.head.wbl != nil: // The existing WBL from the disk might have been replayed while OOO was disabled. wblog = db.head.wbl - } else if !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0 { + case !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0: segmentSize := wlog.DefaultSegmentSize // Wal is set to a custom size. if db.opts.WALSegmentSize > 0 { @@ -1532,10 +1533,11 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { } toDelete := filepath.Join(db.dir, ulid.String()) - if _, err := os.Stat(toDelete); os.IsNotExist(err) { + switch _, err := os.Stat(toDelete); { + case os.IsNotExist(err): // Noop. continue - } else if err != nil { + case err != nil: return errors.Wrapf(err, "stat dir %v", toDelete) } diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 6bc91ae06d..86cb097513 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -344,9 +344,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 } if value.IsStaleNaN(v) { - if s.lastHistogramValue != nil { + switch { + case s.lastHistogramValue != nil: return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil) - } else if s.lastFloatHistogramValue != nil { + case s.lastFloatHistogramValue != nil: return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v}) } } @@ -552,9 +553,10 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels return 0, err } if created { - if h != nil { + switch { + case h != nil: s.lastHistogramValue = &histogram.Histogram{} - } else if fh != nil { + case fh != nil: s.lastFloatHistogramValue = &histogram.FloatHistogram{} } a.series = append(a.series, record.RefSeries{ @@ -564,7 +566,8 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels } } - if h != nil { + switch { + case h != nil: s.Lock() if err := s.appendableHistogram(t, h); err != nil { s.Unlock() @@ -581,7 +584,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels H: h, }) a.histogramSeries = append(a.histogramSeries, s) - } else if fh != nil { + case fh != nil: s.Lock() if err := s.appendableFloatHistogram(t, fh); err != nil { s.Unlock() @@ -938,7 +941,10 @@ func (a *headAppender) Commit() (err error) { var ok, chunkCreated bool - if err == nil && oooSample { + switch { + case err != nil: + // Do nothing here. + case oooSample: // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRef chunks.ChunkDiskMapperRef @@ -976,7 +982,7 @@ func (a *headAppender) Commit() (err error) { // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. samplesAppended-- } - } else if err == nil { + default: ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange) if ok { if s.T < inOrderMint { @@ -1177,14 +1183,15 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts) } // We have 3 cases here - // - !okToAppend -> We need to cut a new chunk. + // - !okToAppend or counterReset -> We need to cut a new chunk. // - okToAppend but we have inserts → Existing chunk needs // recoding before we can append our histogram. // - okToAppend and no inserts → Chunk is ready to support our histogram. - if !okToAppend || counterReset { + switch { + case !okToAppend || counterReset: c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) chunkCreated = true - } else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { + case len(pForwardInserts) > 0 || len(nForwardInserts) > 0: // New buckets have appeared. We need to recode all // prior histogram samples within the chunk before we // can process this one. @@ -1270,14 +1277,15 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts) } // We have 3 cases here - // - !okToAppend -> We need to cut a new chunk. + // - !okToAppend or counterReset -> We need to cut a new chunk. // - okToAppend but we have inserts → Existing chunk needs // recoding before we can append our histogram. // - okToAppend and no inserts → Chunk is ready to support our histogram. - if !okToAppend || counterReset { + switch { + case !okToAppend || counterReset: c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange) chunkCreated = true - } else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { + case len(pForwardInserts) > 0 || len(nForwardInserts) > 0: // New buckets have appeared. We need to recode all // prior histogram samples within the chunk before we // can process this one. diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 9c40bcd7a8..9c546ab164 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -424,7 +424,8 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper break } - if chunkRef == meta.OOOLastRef { + switch { + case chunkRef == meta.OOOLastRef: tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ meta: chunks.Meta{ MinTime: meta.OOOLastMinTime, @@ -435,7 +436,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper origMinT: c.minTime, origMaxT: c.maxTime, }) - } else if c.OverlapsClosedInterval(mint, maxt) { + case c.OverlapsClosedInterval(mint, maxt): tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ meta: chunks.Meta{ MinTime: c.minTime, @@ -594,12 +595,14 @@ type boundedIterator struct { func (b boundedIterator) Next() chunkenc.ValueType { for b.Iterator.Next() == chunkenc.ValFloat { t, _ := b.Iterator.At() - if t < b.minT { + switch { + case t < b.minT: continue - } else if t > b.maxT { + case t > b.maxT: return chunkenc.ValNone + default: + return chunkenc.ValFloat } - return chunkenc.ValFloat } return chunkenc.ValNone } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 39bcf4c78b..a183e99c09 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -2960,10 +2960,11 @@ func TestAppendHistogram(t *testing.T) { actHistograms := make([]tsdbutil.Sample, 0, len(expHistograms)) actFloatHistograms := make([]tsdbutil.Sample, 0, len(expFloatHistograms)) for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() { - if typ == chunkenc.ValHistogram { + switch typ { + case chunkenc.ValHistogram: ts, h := it.AtHistogram() actHistograms = append(actHistograms, sample{t: ts, h: h}) - } else if typ == chunkenc.ValFloatHistogram { + case chunkenc.ValFloatHistogram: ts, fh := it.AtFloatHistogram() actFloatHistograms = append(actFloatHistograms, sample{t: ts, fh: fh}) } @@ -3565,14 +3566,15 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { for i, eh := range expHistograms { ah := actHistograms[i] if floatHistogram { - if value.IsStaleNaN(eh.fh.Sum) { + switch { + case value.IsStaleNaN(eh.fh.Sum): actNumStale++ require.True(t, value.IsStaleNaN(ah.fh.Sum)) // To make require.Equal work. ah.fh.Sum = 0 eh.fh = eh.fh.Copy() eh.fh.Sum = 0 - } else if i > 0 { + case i > 0: prev := expHistograms[i-1] if prev.fh == nil || value.IsStaleNaN(prev.fh.Sum) { eh.fh.CounterResetHint = histogram.UnknownCounterReset @@ -3580,14 +3582,15 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { } require.Equal(t, eh, ah) } else { - if value.IsStaleNaN(eh.h.Sum) { + switch { + case value.IsStaleNaN(eh.h.Sum): actNumStale++ require.True(t, value.IsStaleNaN(ah.h.Sum)) // To make require.Equal work. ah.h.Sum = 0 eh.h = eh.h.Copy() eh.h.Sum = 0 - } else if i > 0 { + case i > 0: prev := expHistograms[i-1] if prev.h == nil || value.IsStaleNaN(prev.h.Sum) { eh.h.CounterResetHint = histogram.UnknownCounterReset @@ -4488,19 +4491,19 @@ func TestHistogramValidation(t *testing.T) { for testName, tc := range tests { t.Run(testName, func(t *testing.T) { - err := ValidateHistogram(tc.h) - if tc.errMsg != "" { + switch err := ValidateHistogram(tc.h); { + case tc.errMsg != "": require.ErrorContains(t, err, tc.errMsg) - } else { + default: require.NoError(t, err) } - err = ValidateFloatHistogram(tc.h.ToFloat()) - if tc.errMsgFloat != "" { + switch err := ValidateFloatHistogram(tc.h.ToFloat()); { + case tc.errMsgFloat != "": require.ErrorContains(t, err, tc.errMsgFloat) - } else if tc.errMsg != "" { + case tc.errMsg != "": require.ErrorContains(t, err, tc.errMsg) - } else { + default: require.NoError(t, err) } }) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 15df374fc5..514775210e 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -565,12 +565,11 @@ func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { for _, it := range p { // NOTE: mergedPostings struct requires the user to issue an initial Next. - if it.Next() { + switch { + case it.Next(): ph = append(ph, it) - } else { - if it.Err() != nil { - return &mergedPostings{err: it.Err()}, true - } + case it.Err() != nil: + return &mergedPostings{err: it.Err()}, true } } @@ -704,16 +703,16 @@ func (rp *removedPostings) Next() bool { return true } - fcur, rcur := rp.full.At(), rp.remove.At() - if fcur < rcur { + switch fcur, rcur := rp.full.At(), rp.remove.At(); { + case fcur < rcur: rp.cur = fcur rp.fok = rp.full.Next() return true - } else if rcur < fcur { + case rcur < fcur: // Forward the remove postings to the right position. rp.rok = rp.remove.Seek(fcur) - } else { + default: // Skip the current posting. rp.fok = rp.full.Next() } @@ -848,9 +847,10 @@ func (it *bigEndianPostings) Err() error { func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) { h := make(postingsWithIndexHeap, 0, len(candidates)) for idx, it := range candidates { - if it.Next() { + switch { + case it.Next(): h = append(h, postingsWithIndex{index: idx, p: it}) - } else if it.Err() != nil { + case it.Err() != nil: return nil, it.Err() } } diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index fd16c6ca8f..8ba3ea39af 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -123,7 +123,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra } } - // There is nothing to do if we did not collect any chunk + // There is nothing to do if we did not collect any chunk. if len(tmpChks) == 0 { return nil } @@ -136,14 +136,15 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // chunks Meta the first chunk that overlaps with others. // Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650) // In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to - // to return chunk Metas for chunk 5 and chunk 6 + // to return chunk Metas for chunk 5 and chunk 6e *chks = append(*chks, tmpChks[0]) - maxTime := tmpChks[0].MaxTime // tracks the maxTime of the previous "to be merged chunk" + maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk". for _, c := range tmpChks[1:] { - if c.MinTime > maxTime { + switch { + case c.MinTime > maxTime: *chks = append(*chks, c) maxTime = c.MaxTime - } else if c.MaxTime > maxTime { + case c.MaxTime > maxTime: maxTime = c.MaxTime (*chks)[len(*chks)-1].MaxTime = c.MaxTime } diff --git a/tsdb/querier.go b/tsdb/querier.go index f3852d517b..abddebb870 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -239,18 +239,20 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, } for _, m := range ms { - if m.Name == "" && m.Value == "" { // Special-case for AllPostings, used in tests at least. + switch { + case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least. k, v := index.AllPostingsKey() allPostings, err := ix.Postings(k, v) if err != nil { return nil, err } its = append(its, allPostings) - } else if labelMustBeSet[m.Name] { + case labelMustBeSet[m.Name]: // If this matcher must be non-empty, we can be smarter. matchesEmpty := m.Matches("") isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp - if isNot && matchesEmpty { // l!="foo" + switch { + case isNot && matchesEmpty: // l!="foo" // If the label can't be empty and is a Not and the inner matcher // doesn't match empty, then subtract it out at the end. inverse, err := m.Inverse() @@ -263,7 +265,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, return nil, err } notIts = append(notIts, it) - } else if isNot && !matchesEmpty { // l!="" + case isNot && !matchesEmpty: // l!="" // If the label can't be empty and is a Not, but the inner matcher can // be empty we need to use inversePostingsForMatcher. inverse, err := m.Inverse() @@ -279,7 +281,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, return index.EmptyPostings(), nil } its = append(its, it) - } else { // l="a" + default: // l="a" // Non-Not matcher, use normal postingsForMatcher. it, err := postingsForMatcher(ix, m) if err != nil { @@ -290,7 +292,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, } its = append(its, it) } - } else { // l="" + default: // l="" // If the matchers for a labelname selects an empty value, it selects all // the series which don't have the label name set too. See: // https://github.com/prometheus/prometheus/issues/3575 and @@ -966,23 +968,24 @@ func (m *mergedStringIter) Next() bool { return false } - if !m.aok { + switch { + case !m.aok: m.cur = m.b.At() m.bok = m.b.Next() m.err = m.b.Err() - } else if !m.bok { + case !m.bok: m.cur = m.a.At() m.aok = m.a.Next() m.err = m.a.Err() - } else if m.b.At() > m.a.At() { + case m.b.At() > m.a.At(): m.cur = m.a.At() m.aok = m.a.Next() m.err = m.a.Err() - } else if m.a.At() > m.b.At() { + case m.a.At() > m.b.At(): m.cur = m.b.At() m.bok = m.b.Next() m.err = m.b.Err() - } else { // Equal. + default: // Equal. m.cur = m.b.At() m.aok = m.a.Next() m.err = m.a.Err() diff --git a/tsdb/tombstones/tombstones.go b/tsdb/tombstones/tombstones.go index f7e2a2a1e7..a52e1caa97 100644 --- a/tsdb/tombstones/tombstones.go +++ b/tsdb/tombstones/tombstones.go @@ -190,9 +190,10 @@ type Stone struct { func ReadTombstones(dir string) (Reader, int64, error) { b, err := os.ReadFile(filepath.Join(dir, TombstonesFilename)) - if os.IsNotExist(err) { + switch { + case os.IsNotExist(err): return NewMemTombstones(), 0, nil - } else if err != nil { + case err != nil: return nil, 0, err } diff --git a/tsdb/wal.go b/tsdb/wal.go index 93d445a126..0c57865e66 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -522,9 +522,10 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { } }() - if n, err := f.Read(metab); err != nil { + switch n, err := f.Read(metab); { + case err != nil: return nil, errors.Wrapf(err, "validate meta %q", f.Name()) - } else if n != 8 { + case n != 8: return nil, errors.Errorf("invalid header size %d in %q", n, f.Name()) } @@ -1063,9 +1064,10 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) { tr := io.TeeReader(cr, r.crc32) b := make([]byte, 6) - if n, err := tr.Read(b); err != nil { + switch n, err := tr.Read(b); { + case err != nil: return 0, 0, nil, err - } else if n != 6 { + case n != 6: return 0, 0, nil, r.corruptionErr("invalid entry header size %d", n) } @@ -1087,15 +1089,17 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) { } buf := r.buf[:length] - if n, err := tr.Read(buf); err != nil { + switch n, err := tr.Read(buf); { + case err != nil: return 0, 0, nil, err - } else if n != length { + case n != length: return 0, 0, nil, r.corruptionErr("invalid entry body size %d", n) } - if n, err := cr.Read(b[:4]); err != nil { + switch n, err := cr.Read(b[:4]); { + case err != nil: return 0, 0, nil, err - } else if n != 4 { + case n != 4: return 0, 0, nil, r.corruptionErr("invalid checksum length %d", n) } if exp, has := binary.BigEndian.Uint32(b[:4]), r.crc32.Sum32(); has != exp { diff --git a/tsdb/wlog/live_reader.go b/tsdb/wlog/live_reader.go index 29467aef4a..0ca69093a5 100644 --- a/tsdb/wlog/live_reader.go +++ b/tsdb/wlog/live_reader.go @@ -126,9 +126,10 @@ func (r *LiveReader) Next() bool { // we return EOF and the user can try again later. If we have a full // page, buildRecord is guaranteed to return a record or a non-EOF; it // has checks the records fit in pages. - if ok, err := r.buildRecord(); ok { + switch ok, err := r.buildRecord(); { + case ok: return true - } else if err != nil && err != io.EOF { + case err != nil && err != io.EOF: r.err = err return false } diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 72121283d8..b0c17dcbac 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -405,9 +405,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Ignore errors reading to end of segment whilst replaying the WAL. if !tail { - if err != nil && errors.Cause(err) != io.EOF { + switch { + case err != nil && errors.Cause(err) != io.EOF: level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) - } else if reader.Offset() != size { + case reader.Offset() != size: level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) } return nil @@ -425,9 +426,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Ignore all errors reading to end of segment whilst replaying the WAL. if !tail { - if err != nil && errors.Cause(err) != io.EOF { + switch { + case err != nil && errors.Cause(err) != io.EOF: level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) - } else if reader.Offset() != size { + case reader.Offset() != size: level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) } return nil diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go index acdd6f7bea..bece9d5c83 100644 --- a/util/treecache/treecache.go +++ b/util/treecache/treecache.go @@ -176,11 +176,11 @@ func (tc *ZookeeperTreeCache) loop(path string) { node = childNode } - err := tc.recursiveNodeUpdate(ev.Path, node) - if err != nil { + switch err := tc.recursiveNodeUpdate(ev.Path, node); { + case err != nil: level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err) failure() - } else if tc.head.data == nil { + case tc.head.data == nil: level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix) failure() } @@ -214,13 +214,14 @@ func (tc *ZookeeperTreeCache) loop(path string) { func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTreeCacheNode) error { data, _, dataWatcher, err := tc.conn.GetW(path) - if errors.Is(err, zk.ErrNoNode) { + switch { + case errors.Is(err, zk.ErrNoNode): tc.recursiveDelete(path, node) if node == tc.head { return fmt.Errorf("path %s does not exist", path) } return nil - } else if err != nil { + case err != nil: return err } @@ -230,10 +231,11 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr } children, _, childWatcher, err := tc.conn.ChildrenW(path) - if errors.Is(err, zk.ErrNoNode) { + switch { + case errors.Is(err, zk.ErrNoNode): tc.recursiveDelete(path, node) return nil - } else if err != nil { + case err != nil: return err } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 0624cf2d8b..f4d6d27da1 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -989,12 +989,14 @@ func (api *API) targets(r *http.Request) apiFuncResult { ScrapeURL: target.URL().String(), GlobalURL: globalURL.String(), LastError: func() string { - if err == nil && lastErrStr == "" { + switch { + case err == nil && lastErrStr == "": return "" - } else if err != nil { + case err != nil: return errors.Wrapf(err, lastErrStr).Error() + default: + return lastErrStr } - return lastErrStr }(), LastScrape: target.LastScrape(), LastScrapeDuration: target.LastScrapeDuration().Seconds(), diff --git a/web/federate_test.go b/web/federate_test.go index 76d4b9cf67..61ef62f46d 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -388,13 +388,13 @@ func TestFederationWithNativeHistograms(t *testing.T) { break } require.NoError(t, err) - if et == textparse.EntryHelp { - metricFamilies++ - } if et == textparse.EntryHistogram || et == textparse.EntrySeries { p.Metric(&l) } - if et == textparse.EntryHistogram { + switch et { + case textparse.EntryHelp: + metricFamilies++ + case textparse.EntryHistogram: _, parsedTimestamp, h, fh := p.Histogram() require.Nil(t, h) actVec = append(actVec, promql.Sample{ @@ -402,7 +402,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { H: fh, Metric: l, }) - } else if et == textparse.EntrySeries { + case textparse.EntrySeries: _, parsedTimestamp, f := p.Series() actVec = append(actVec, promql.Sample{ T: *parsedTimestamp, From 1f044154940dfc380e9ce6dd70bae3dc772af14c Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 18 Apr 2023 11:14:54 +0200 Subject: [PATCH 12/26] Update many Go dependencies Ran `make update-go-deps` then hand-edited to remove any downgrades. Then backed out changes to: * Azure: still waiting someone to test this. * Kubernetes: needs update elsewhere to klog. * kube-openapi: it doesn't compile with previous Go version. Signed-off-by: Bryan Boreham --- go.mod | 50 +++++++++++++-------------- go.sum | 104 +++++++++++++++++++++++++++++---------------------------- 2 files changed, 78 insertions(+), 76 deletions(-) diff --git a/go.mod b/go.mod index 92cc3931c7..7b309b03f5 100644 --- a/go.mod +++ b/go.mod @@ -8,46 +8,46 @@ require ( github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/alecthomas/kingpin/v2 v2.3.2 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.44.217 + github.com/aws/aws-sdk-go v1.44.245 github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.98.0 - github.com/docker/docker v23.0.1+incompatible + github.com/docker/docker v23.0.4+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.11.0 - github.com/envoyproxy/protoc-gen-validate v0.9.1 + github.com/envoyproxy/protoc-gen-validate v0.10.1 github.com/fsnotify/fsnotify v1.6.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 - github.com/go-openapi/strfmt v0.21.3 + github.com/go-openapi/strfmt v0.21.7 github.com/go-zookeeper/zk v1.0.3 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 - github.com/gophercloud/gophercloud v1.2.0 + github.com/google/pprof v0.0.0-20230406165453-00490a63f317 + github.com/gophercloud/gophercloud v1.3.0 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.20.0 - github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b - github.com/hetznercloud/hcloud-go v1.41.0 - github.com/ionos-cloud/sdk-go/v6 v6.1.5 + github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 + github.com/hetznercloud/hcloud-go v1.42.0 + github.com/ionos-cloud/sdk-go/v6 v6.1.6 github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.14.1 + github.com/linode/linodego v1.16.1 github.com/miekg/dns v1.1.53 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 - github.com/ovh/go-ovh v1.3.0 + github.com/ovh/go-ovh v1.4.1 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.25.0 - github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_golang v1.15.0 github.com/prometheus/client_model v0.3.0 github.com/prometheus/common v0.42.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.9.1 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/stretchr/testify v1.8.2 github.com/vultr/govultr/v2 v2.17.2 @@ -59,18 +59,18 @@ require ( go.opentelemetry.io/otel/sdk v1.14.0 go.opentelemetry.io/otel/trace v1.14.0 go.uber.org/atomic v1.10.0 - go.uber.org/automaxprocs v1.5.1 + go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 - golang.org/x/net v0.8.0 - golang.org/x/oauth2 v0.6.0 + golang.org/x/net v0.9.0 + golang.org/x/oauth2 v0.7.0 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.6.0 + golang.org/x/sys v0.7.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.7.0 + golang.org/x/tools v0.8.0 google.golang.org/api v0.114.0 google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 google.golang.org/grpc v1.53.0 - google.golang.org/protobuf v1.29.1 + google.golang.org/protobuf v1.30.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.26.2 @@ -85,7 +85,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect ) @@ -166,16 +166,16 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.mongodb.org/mongo-driver v1.11.2 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect go.opentelemetry.io/otel/metric v0.37.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect golang.org/x/crypto v0.7.0 // indirect - golang.org/x/exp v0.0.0-20230307190834-24139beb5833 - golang.org/x/mod v0.9.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 + golang.org/x/mod v0.10.0 // indirect + golang.org/x/term v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 7ea1be034d..c699aa9dff 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.217 h1:FcWC56MRl+k756aH3qeMQTylSdeJ58WN0iFz3fkyRz0= -github.com/aws/aws-sdk-go v1.44.217/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= +github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -154,8 +154,8 @@ github.com/digitalocean/godo v1.98.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzK github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= -github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= +github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -180,8 +180,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -245,8 +245,9 @@ github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxR github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -366,8 +367,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 h1:CqYfpuYIjnlNxM3msdyPRKabhXZWbKjf3Q8BWROFBso= -github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ= +github.com/google/pprof v0.0.0-20230406165453-00490a63f317/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -380,8 +381,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= -github.com/gophercloud/gophercloud v1.2.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= +github.com/gophercloud/gophercloud v1.3.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -450,13 +451,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b/go.mod h1:bKUb1ytds5KwUioHdvdq9jmrDqCThv95si0Ub7iNeBg= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCrNvonL09r7EiQ6M2rNt+Cmjbn1QbzchFoTWJFpj4= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= -github.com/hetznercloud/hcloud-go v1.41.0/go.mod h1:NaHg47L6C77mngZhwBG652dTAztYrsZ2/iITJKhQkHA= +github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= +github.com/hetznercloud/hcloud-go v1.42.0/go.mod h1:YADL8AbmQYH0Eo+1lkuyoc8LutT0UeMvaKP47nNUb+Y= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -464,8 +465,9 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.5 h1:BFqThLOgrGJWeo7w6UDyYuNxyi/GqEmNPl7C/YcQ8Fw= -github.com/ionos-cloud/sdk-go/v6 v6.1.5/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= +github.com/ionos-cloud/sdk-go/v6 v6.1.6/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -513,8 +515,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= -github.com/linode/linodego v1.14.1/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk= +github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= +github.com/linode/linodego v1.16.1/go.mod h1:aESRAbpLY9R6IA1WGAWHikRI9DU9Lhesapv1MhKmPHM= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -543,6 +545,7 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= @@ -614,8 +617,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= -github.com/ovh/go-ovh v1.3.0/go.mod h1:AxitLZ5HBRPyUd+Zl60Ajaag+rNTdVXWIkzfrVuTXWA= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -644,8 +647,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= +github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -683,16 +686,16 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shoenig/test v0.6.2 h1:tdq+WGnznwE5xcOMXkqqXuudK75RkSGBazBGcP1lX6w= +github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -766,8 +769,8 @@ go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mI go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.2 h1:+1v2rDQUWNcGW7/7E0Jvdz51V38XXxJfhzbV17aNHCw= -go.mongodb.org/mongo-driver v1.11.2/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -802,8 +805,8 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk= -go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= +go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= +go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -839,8 +842,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -862,8 +865,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -911,8 +914,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -920,8 +923,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1003,14 +1006,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1021,8 +1024,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1082,8 +1085,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1183,8 +1186,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1198,7 +1201,6 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= From 74e6668e87adc1a0d0833a40f96f7f064c2012ae Mon Sep 17 00:00:00 2001 From: gotjosh Date: Thu, 20 Apr 2023 09:34:05 +0100 Subject: [PATCH 13/26] update docs Signed-off-by: gotjosh --- docs/querying/api.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 820414fb13..ef7fa54c67 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -675,9 +675,9 @@ GET /api/v1/rules URL query parameters: - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. -- `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. -- `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of provided rule group names are returned. When the parameter is absent or empty, no filtering is done. -- `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of provided filepaths are returned. When the parameter is absent or empty, no filtering is done. +- `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. +- `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules From e78be38cc08231c0a151f4672b3a049f66c83f11 Mon Sep 17 00:00:00 2001 From: gotjosh Date: Thu, 20 Apr 2023 11:20:10 +0100 Subject: [PATCH 14/26] don't show empty groups Signed-off-by: gotjosh --- web/api/v1/api.go | 8 +++++--- web/api/v1/api_test.go | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 9a13e09d98..e700f71205 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1309,7 +1309,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { fSet := queryFormToSet(r.Form["file[]"]) ruleGroups := api.rulesRetriever(r.Context()).RuleGroups() - res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, len(ruleGroups))} + res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, 0, len(ruleGroups))} typ := strings.ToLower(r.URL.Query().Get("type")) if typ != "" && typ != "alert" && typ != "record" { @@ -1319,7 +1319,8 @@ func (api *API) rules(r *http.Request) apiFuncResult { returnAlerts := typ == "" || typ == "alert" returnRecording := typ == "" || typ == "record" - for i, grp := range ruleGroups { + rgs := make([]*RuleGroup, 0, len(ruleGroups)) + for _, grp := range ruleGroups { if len(rgSet) > 0 { if _, ok := rgSet[grp.Name()]; !ok { continue @@ -1400,9 +1401,10 @@ func (api *API) rules(r *http.Request) apiFuncResult { // If the rule group response has no rules, skip it - this means we filtered all the rules of this group. if len(apiRuleGroup.Rules) > 0 { - res.RuleGroups[i] = apiRuleGroup + rgs = append(rgs, apiRuleGroup) } } + res.RuleGroups = rgs return apiFuncResult{res, nil, nil, nil} } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index c3e1bf59d7..53ea21182d 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -2003,7 +2003,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E { endpoint: api.rules, query: url.Values{"rule_group[]": []string{"respond-with-nothing"}}, - response: &RuleDiscovery{RuleGroups: []*RuleGroup{nil}}, + response: &RuleDiscovery{RuleGroups: []*RuleGroup{}}, }, { endpoint: api.rules, From 637235f0a6af483ddf45e16c5ad800fee5bb946b Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 20 Apr 2023 16:19:50 +0200 Subject: [PATCH 15/26] Revert type casting removal This reverts the removal of type casting due to an error in the dragonfly integration. The change in the type casting introduced by the commit causes a type mismatch, resulting in the following errors: util/runtime/limits_default.go:42:57: cannot use rlimit.Cur (variable of type int64) as type uint64 in argument to limitToString util/runtime/limits_default.go:42:90: cannot use rlimit.Max (variable of type int64) as type uint64 in argument to limitToString Reverting this commit to resolve the type mismatch error and maintain compatibility with the dragonfly integration. Signed-off-by: Julien Pivotto --- util/runtime/limits_default.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/util/runtime/limits_default.go b/util/runtime/limits_default.go index 1588a93c79..cd0ce732f9 100644 --- a/util/runtime/limits_default.go +++ b/util/runtime/limits_default.go @@ -39,7 +39,9 @@ func getLimits(resource int, unit string) string { if err != nil { panic("syscall.Getrlimit failed: " + err.Error()) } - return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(rlimit.Cur, unit), limitToString(rlimit.Max, unit)) + // rlimit.Cur and rlimit.Max are int64 on some platforms, such as dragonfly. + // We need to cast them explicitly to uint64. + return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(uint64(rlimit.Cur), unit), limitToString(uint64(rlimit.Max), unit)) //nolint:unconvert } // FdLimits returns the soft and hard limits for file descriptors. From 7e9acc2e46452da9b259d2253ba6fc8ae9bd7e95 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 20 Apr 2023 18:43:51 +0200 Subject: [PATCH 16/26] golangci-lint: remove skip-cache and restore singleCaseSwitch rule Signed-off-by: Matthieu MOREL --- .github/workflows/ci.yml | 1 - .golangci.yml | 3 --- promql/engine.go | 9 +++------ 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 07b1242c28..ee63ee300d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -150,7 +150,6 @@ jobs: uses: golangci/golangci-lint-action@v3.4.0 with: args: --verbose - skip-cache: true version: v1.51.2 fuzzing: uses: ./.github/workflows/fuzzing.yml diff --git a/.golangci.yml b/.golangci.yml index c0c20d425a..d1cd86ed59 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -24,9 +24,6 @@ issues: - linters: - gocritic text: "appendAssign" - - linters: - - gocritic - text: "singleCaseSwitch" - path: _test.go linters: - errcheck diff --git a/promql/engine.go b/promql/engine.go index 910601a88d..cbeeb82a1a 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -757,8 +757,7 @@ func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) { ts int64 = math.MaxInt64 ) for _, node := range path { - switch n := node.(type) { - case *parser.SubqueryExpr: + if n, ok := node.(*parser.SubqueryExpr); ok { subqOffset += n.OriginalOffset subqRange += n.Range if n.Timestamp != nil { @@ -847,8 +846,7 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration { var interval time.Duration for _, node := range path { - switch n := node.(type) { - case *parser.SubqueryExpr: + if n, ok := node.(*parser.SubqueryExpr); ok { interval = n.Step if n.Step == 0 { interval = time.Duration(ng.noStepSubqueryIntervalFn(durationMilliseconds(n.Range))) * time.Millisecond @@ -914,8 +912,7 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { if len(p) == 0 { return false, nil } - switch n := p[len(p)-1].(type) { - case *parser.AggregateExpr: + if n, ok := p[len(p)-1].(*parser.AggregateExpr); ok { return !n.Without, n.Grouping } return false, nil From e9a1e26ab700c1206c749b0b069fff99c8426aac Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Sat, 22 Apr 2023 02:27:15 +0800 Subject: [PATCH 17/26] Perform integer/float histogram type checking on conversions, and use a consistent method for determining integer vs float histogram Signed-off-by: Jeanette Tan --- prompb/custom.go | 5 +++++ storage/remote/codec.go | 24 +++++++++++++++--------- storage/remote/codec_test.go | 2 +- storage/remote/queue_manager_test.go | 2 +- storage/remote/write_handler.go | 2 +- storage/remote/write_handler_test.go | 2 +- 6 files changed, 24 insertions(+), 13 deletions(-) diff --git a/prompb/custom.go b/prompb/custom.go index 4b07187bd2..13d6e0f0cd 100644 --- a/prompb/custom.go +++ b/prompb/custom.go @@ -20,6 +20,11 @@ import ( func (m Sample) T() int64 { return m.Timestamp } func (m Sample) V() float64 { return m.Value } +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { size := r.Size() data, ok := p.Get().(*[]byte) diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 02c84a3e6c..9a683b908f 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -455,10 +455,10 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { } func getHistogramValType(h *prompb.Histogram) chunkenc.ValueType { - if _, isInt := h.GetCount().(*prompb.Histogram_CountInt); isInt { - return chunkenc.ValHistogram + if h.IsFloatHistogram() { + return chunkenc.ValFloatHistogram } - return chunkenc.ValFloatHistogram + return chunkenc.ValHistogram } // At implements chunkenc.Iterator. @@ -624,9 +624,11 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar { } // HistogramProtoToHistogram extracts a (normal integer) Histogram from the -// provided proto message. The caller has to make sure that the proto message -// represents an integer histogram and not a float histogram. +// provided proto message. func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { + if hp.IsFloatHistogram() { + panic("don't call HistogramProtoToHistogram on a float histogram") + } return &histogram.Histogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), Schema: hp.Schema, @@ -642,9 +644,11 @@ func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { } // FloatHistogramProtoToFloatHistogram extracts a float Histogram from the -// provided proto message to a Float Histogram. The caller has to make sure that -// the proto message represents a float histogram and not an integer histogram. +// provided proto message to a Float Histogram. func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { + if !hp.IsFloatHistogram() { + panic("don't call FloatHistogramProtoToFloatHistogram on an integer histogram") + } return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), Schema: hp.Schema, @@ -660,9 +664,11 @@ func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHi } // HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message -// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a -// float histogram. +// to a float histogram. func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { + if hp.IsFloatHistogram() { + panic("don't call HistogramProtoToFloatHistogram on a float histogram") + } return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), Schema: hp.Schema, diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 27e2cc704d..dbd5cec219 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -528,7 +528,7 @@ func TestNilHistogramProto(*testing.T) { // This function will panic if it impromperly handles nil // values, causing the test to fail. HistogramProtoToHistogram(prompb.Histogram{}) - FloatHistogramProtoToFloatHistogram(prompb.Histogram{}) + HistogramProtoToFloatHistogram(prompb.Histogram{}) } func exampleHistogram() histogram.Histogram { diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index a57c3bf7b1..b43258ff06 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -802,7 +802,7 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte) error { for _, histogram := range ts.Histograms { count++ - if histogram.GetCountFloat() > 0 || histogram.GetZeroCountFloat() > 0 { + if histogram.IsFloatHistogram() { c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], histogram) } else { c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 5aa1130888..1f4c43e595 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -125,7 +125,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err } for _, hp := range ts.Histograms { - if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram. + if hp.IsFloatHistogram() { fhs := FloatHistogramProtoToFloatHistogram(hp) _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs) } else { diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index e7a88ddc23..3bce5f1d88 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -68,7 +68,7 @@ func TestRemoteWriteHandler(t *testing.T) { } for _, hp := range ts.Histograms { - if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram. + if hp.IsFloatHistogram() { fh := FloatHistogramProtoToFloatHistogram(hp) require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k]) } else { From 1102ffd188b4a9bc16c2c120c3eeb3dc09a92a99 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Sat, 22 Apr 2023 02:27:15 +0800 Subject: [PATCH 18/26] Fix according to code review Signed-off-by: Jeanette Tan --- storage/remote/codec.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 9a683b908f..6a58ec4ac6 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -624,10 +624,11 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar { } // HistogramProtoToHistogram extracts a (normal integer) Histogram from the -// provided proto message. +// provided proto message. The caller has to make sure that the proto message +// represents an integer histogram and not a float histogram, or it panics. func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { if hp.IsFloatHistogram() { - panic("don't call HistogramProtoToHistogram on a float histogram") + panic("HistogramProtoToHistogram called with a float histogram") } return &histogram.Histogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), @@ -644,10 +645,12 @@ func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { } // FloatHistogramProtoToFloatHistogram extracts a float Histogram from the -// provided proto message to a Float Histogram. +// provided proto message to a Float Histogram. The caller has to make sure that +// the proto message represents a float histogram and not an integer histogram, +// or it panics. func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { if !hp.IsFloatHistogram() { - panic("don't call FloatHistogramProtoToFloatHistogram on an integer histogram") + panic("FloatHistogramProtoToFloatHistogram called with an integer histogram") } return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), @@ -664,10 +667,11 @@ func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHi } // HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message -// to a float histogram. +// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a +// float histogram, or it panics. func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { if hp.IsFloatHistogram() { - panic("don't call HistogramProtoToFloatHistogram on a float histogram") + panic("HistogramProtoToFloatHistogram called with a float histogram") } return &histogram.FloatHistogram{ CounterResetHint: histogram.CounterResetHint(hp.ResetHint), From 13938d0ccc74b6e094cdf9cd88b921285e670081 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 22 Apr 2023 10:16:49 +0200 Subject: [PATCH 19/26] Update test_golang_oldest to 1.19 Since test_go is on 1.20, and it should use the previous version. Signed-off-by: Bryan Boreham --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ee63ee300d..bfa1ac00d0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -66,7 +66,7 @@ jobs: runs-on: ubuntu-latest # The go verson in this image should be N-1 wrt test_go. container: - image: quay.io/prometheus/golang-builder:1.18-base + image: quay.io/prometheus/golang-builder:1.19-base steps: - uses: actions/checkout@v3 - run: make build From d281ebb178a9c1b51f74cd61afbdf16c1ca5f1f8 Mon Sep 17 00:00:00 2001 From: Vladimir Varankin Date: Tue, 28 Mar 2023 20:47:18 +0200 Subject: [PATCH 20/26] web: display GOMEMLIMIT in runtime info Signed-off-by: Vladimir Varankin --- web/api/v1/api.go | 1 + web/web.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index c43f4573ab..e1168e1a66 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -143,6 +143,7 @@ type RuntimeInfo struct { CorruptionCount int64 `json:"corruptionCount"` GoroutineCount int `json:"goroutineCount"` GOMAXPROCS int `json:"GOMAXPROCS"` + GOMEMLIMIT int64 `json:"GOMEMLIMIT"` GOGC string `json:"GOGC"` GODEBUG string `json:"GODEBUG"` StorageRetention string `json:"storageRetention"` diff --git a/web/web.go b/web/web.go index c8a32e0a66..27378b3b81 100644 --- a/web/web.go +++ b/web/web.go @@ -29,6 +29,7 @@ import ( "path" "path/filepath" "runtime" + "runtime/debug" "strconv" "strings" "sync" @@ -710,6 +711,7 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { CWD: h.cwd, GoroutineCount: runtime.NumGoroutine(), GOMAXPROCS: runtime.GOMAXPROCS(0), + GOMEMLIMIT: debug.SetMemoryLimit(-1), GOGC: os.Getenv("GOGC"), GODEBUG: os.Getenv("GODEBUG"), } From c06c02b3b10b3d98bd7d59400d0a1b3762e90af3 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Tue, 25 Apr 2023 16:27:39 +0300 Subject: [PATCH 21/26] Fix recommended protoc version Signed-off-by: Paschalis Tsilias --- prompb/README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/prompb/README.md b/prompb/README.md index 8c19b17e9d..1b85bc2f3c 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -4,6 +4,8 @@ re-compile them when building Prometheus. If however you have modified the defs and do need to re-compile, run `make proto` from the parent dir. -In order for the script to run, you'll need `protoc` (version 3.12.3) in your -PATH. +In order for the [script][] to run, you'll need `protoc` (version 3.15.8) in +your PATH. + +[script]: ../scripts/genproto.sh From 417e847662e248e2a0213f9ee34cc99326f5f4e0 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Tue, 25 Apr 2023 17:17:22 +0300 Subject: [PATCH 22/26] Update prompb/README.md Co-authored-by: Julien Pivotto Signed-off-by: Paschalis Tsilias --- prompb/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prompb/README.md b/prompb/README.md index 1b85bc2f3c..c7c31399c1 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -4,7 +4,7 @@ re-compile them when building Prometheus. If however you have modified the defs and do need to re-compile, run `make proto` from the parent dir. -In order for the [script][] to run, you'll need `protoc` (version 3.15.8) in +In order for the [script][../scripts/genproto.sh] to run, you'll need `protoc` (version 3.15.8) in your PATH. [script]: ../scripts/genproto.sh From 38de61b59b38b35fc33b1ef8f9a35f394e733f13 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Tue, 25 Apr 2023 17:58:42 +0200 Subject: [PATCH 23/26] Propose Jesus Vazquez as 2.45 release shepherd Signed-off-by: Jesus Vazquez --- RELEASE.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 26f72ba628..d7f24dabd5 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -49,7 +49,8 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) | | v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) | | v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) | -| v2.45 | 2023-05-31 | **searching for volunteer** | +| v2.45 | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | +| v2.46 | 2023-07-12 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. From 93f64754c049f05274c4fdbfd20e5054da7c4682 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 26 Apr 2023 12:43:19 +0300 Subject: [PATCH 24/26] Remove extra line Signed-off-by: Paschalis Tsilias --- prompb/README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/prompb/README.md b/prompb/README.md index c7c31399c1..d71438afb9 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -6,6 +6,3 @@ If however you have modified the defs and do need to re-compile, run In order for the [script][../scripts/genproto.sh] to run, you'll need `protoc` (version 3.15.8) in your PATH. - -[script]: ../scripts/genproto.sh - From 55626c6911e0ceb87920d94e5a15c8cf6c49d962 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 26 Apr 2023 12:44:15 +0300 Subject: [PATCH 25/26] Fix final newline Signed-off-by: Paschalis Tsilias --- prompb/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/prompb/README.md b/prompb/README.md index d71438afb9..c1acb508aa 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -6,3 +6,4 @@ If however you have modified the defs and do need to re-compile, run In order for the [script][../scripts/genproto.sh] to run, you'll need `protoc` (version 3.15.8) in your PATH. + From 8a34c43515fc0b9452ecf4f238c44a9f249d5da3 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 26 Apr 2023 12:57:20 +0300 Subject: [PATCH 26/26] Update prompb/README.md Co-authored-by: Julien Pivotto Signed-off-by: Paschalis Tsilias --- prompb/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prompb/README.md b/prompb/README.md index c1acb508aa..a33d7bfb88 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -4,6 +4,6 @@ re-compile them when building Prometheus. If however you have modified the defs and do need to re-compile, run `make proto` from the parent dir. -In order for the [script][../scripts/genproto.sh] to run, you'll need `protoc` (version 3.15.8) in +In order for the [script](../scripts/genproto.sh) to run, you'll need `protoc` (version 3.15.8) in your PATH.