From 1a6c2283a3e37f5ef6e0df78b72e67d3b2995a56 Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Fri, 22 Oct 2021 10:06:44 +0200 Subject: [PATCH] Format Go source files using 'gofumpt -w -s -extra' Part of #9557 Signed-off-by: Mateusz Gozdek --- cmd/prometheus/main.go | 6 +- cmd/prometheus/main_test.go | 10 +- cmd/promtool/archive.go | 2 +- cmd/promtool/backfill.go | 3 - cmd/promtool/debug.go | 1 - cmd/promtool/main.go | 11 +- cmd/promtool/rules_test.go | 8 +- cmd/promtool/sd_test.go | 11 +- cmd/promtool/tsdb.go | 11 +- cmd/promtool/unittest.go | 1 - config/config.go | 2 +- config/config_test.go | 213 +++-- discovery/aws/ec2.go | 12 +- discovery/aws/lightsail.go | 12 +- discovery/azure/azure.go | 4 +- discovery/consul/consul.go | 4 +- discovery/consul/consul_test.go | 7 +- discovery/digitalocean/mock_test.go | 3 +- discovery/file/file_test.go | 7 +- discovery/hetzner/hcloud.go | 1 + discovery/hetzner/mock_test.go | 6 +- discovery/hetzner/robot.go | 1 + discovery/http/http_test.go | 2 - discovery/kubernetes/client_metrics.go | 10 +- discovery/kubernetes/endpoints_test.go | 2 +- discovery/kubernetes/kubernetes_test.go | 16 +- discovery/kubernetes/node_test.go | 2 +- discovery/legacymanager/manager_test.go | 7 +- discovery/manager_test.go | 11 +- discovery/marathon/marathon.go | 2 - discovery/marathon/marathon_test.go | 76 +- discovery/openstack/hypervisor.go | 6 +- discovery/openstack/hypervisor_test.go | 1 - discovery/openstack/instance.go | 6 +- discovery/openstack/instance_test.go | 1 - discovery/openstack/mock_test.go | 2 +- discovery/openstack/openstack.go | 1 - discovery/scaleway/scaleway.go | 3 +- discovery/targetgroup/targetgroup_test.go | 38 +- discovery/triton/triton.go | 2 +- discovery/triton/triton_test.go | 20 +- discovery/uyuni/uyuni.go | 7 +- discovery/xds/client_test.go | 24 +- discovery/xds/kuma_test.go | 1 - .../examples/custom-sd/adapter-usage/main.go | 5 +- .../examples/custom-sd/adapter/adapter.go | 2 +- .../remote_storage_adapter/graphite/client.go | 2 +- .../graphite/client_test.go | 12 +- .../remote_storage_adapter/influxdb/client.go | 2 +- .../opentsdb/client_test.go | 12 +- notifier/notifier.go | 1 - notifier/notifier_test.go | 7 +- pkg/labels/labels_test.go | 2 +- pkg/logging/file.go | 10 +- pkg/runtime/statfs_default.go | 1 - pkg/runtime/statfs_linux_386.go | 1 - pkg/runtime/statfs_uint32.go | 1 - pkg/textparse/promparse_test.go | 5 +- promql/engine.go | 12 +- promql/engine_test.go | 787 ++++++++++-------- promql/functions.go | 2 +- promql/parser/ast.go | 13 +- promql/parser/generated_parser.y.go | 162 ++-- promql/parser/lex_test.go | 34 +- promql/parser/parse.go | 10 +- promql/parser/parse_test.go | 771 +++++++++++------ promql/query_logger.go | 6 +- rules/alerting.go | 4 +- rules/manager.go | 2 - rules/manager_test.go | 11 +- scrape/scrape_test.go | 17 +- storage/buffer.go | 2 +- storage/merge.go | 8 +- storage/merge_test.go | 3 +- storage/remote/queue_manager.go | 8 +- storage/remote/queue_manager_test.go | 4 +- storage/remote/read_test.go | 1 - storage/remote/write_handler.go | 4 +- template/template.go | 4 +- template/template_test.go | 21 +- tsdb/agent/db.go | 4 +- tsdb/agent/series.go | 2 +- tsdb/block.go | 12 +- tsdb/block_test.go | 4 +- tsdb/chunks/chunks.go | 12 +- tsdb/chunks/head_chunks.go | 11 +- tsdb/chunks/head_chunks_other.go | 8 +- tsdb/chunks/head_chunks_test.go | 3 +- tsdb/chunks/head_chunks_windows.go | 8 +- tsdb/compact.go | 2 +- tsdb/db.go | 10 +- tsdb/db_test.go | 15 +- tsdb/encoding/encoding.go | 1 - tsdb/exemplar.go | 2 +- tsdb/exemplar_test.go | 4 +- tsdb/fileutil/fileutil.go | 6 +- tsdb/fileutil/flock.go | 2 +- tsdb/fileutil/flock_plan9.go | 2 +- tsdb/fileutil/flock_solaris.go | 2 +- tsdb/fileutil/flock_unix.go | 2 +- tsdb/head.go | 3 +- tsdb/head_test.go | 11 +- tsdb/head_wal.go | 13 +- tsdb/index/index.go | 7 +- tsdb/index/index_test.go | 5 +- tsdb/index/postings_test.go | 17 +- tsdb/index/postingsstats.go | 1 - tsdb/index/postingsstats_test.go | 3 +- tsdb/querier.go | 4 +- tsdb/querier_test.go | 10 +- tsdb/record/record.go | 12 +- tsdb/repair_test.go | 2 +- tsdb/test/hash_test.go | 1 - tsdb/tombstones/tombstones_test.go | 2 +- tsdb/tsdbutil/buffer.go | 2 +- tsdb/tsdbutil/chunks.go | 2 +- tsdb/wal.go | 4 +- tsdb/wal/checkpoint.go | 2 +- tsdb/wal/checkpoint_test.go | 31 +- tsdb/wal/reader_test.go | 177 ++-- tsdb/wal/wal.go | 6 +- tsdb/wal/wal_test.go | 8 +- tsdb/wal/watcher_test.go | 22 +- tsdb/wal_test.go | 8 +- util/osutil/hostname.go | 2 - util/strutil/strconv.go | 4 +- util/testutil/roundtrip.go | 4 +- web/api/v1/api.go | 9 +- web/api/v1/api_test.go | 37 +- web/web.go | 1 - 130 files changed, 1697 insertions(+), 1350 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index c18f6aa55..62796f9b6 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -664,7 +664,7 @@ func main() { ) // This is passed to ruleManager.Update(). - var externalURL = cfg.web.ExternalURL.String() + externalURL := cfg.web.ExternalURL.String() reloaders := []reloader{ { @@ -896,7 +896,6 @@ func main() { return nil } } - }, func(err error) { // Wait for any in-progress reloads to complete to avoid @@ -1146,6 +1145,7 @@ type safePromQLNoStepSubqueryInterval struct { func durationToInt64Millis(d time.Duration) int64 { return int64(d / time.Millisecond) } + func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) { i.value.Store(durationToInt64Millis(time.Duration(ev))) } @@ -1159,7 +1159,7 @@ type reloader struct { reloader func(*config.Config) error } -func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { +func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { start := time.Now() timings := []interface{}{} level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 86ecce1ac..e6c8fcc07 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -35,10 +35,12 @@ import ( "github.com/prometheus/prometheus/rules" ) -var promPath = os.Args[0] -var promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml") -var agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml") -var promData = filepath.Join(os.TempDir(), "data") +var ( + promPath = os.Args[0] + promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml") + agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml") + promData = filepath.Join(os.TempDir(), "data") +) func TestMain(m *testing.M) { for i, arg := range os.Args { diff --git a/cmd/promtool/archive.go b/cmd/promtool/archive.go index 520c26b63..cca148cb4 100644 --- a/cmd/promtool/archive.go +++ b/cmd/promtool/archive.go @@ -21,7 +21,7 @@ import ( "github.com/pkg/errors" ) -const filePerm = 0666 +const filePerm = 0o666 type tarGzFileWriter struct { tarWriter *tar.Writer diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index db6eddcd6..b626743de 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -105,7 +105,6 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn // The next sample is not in this timerange, we can avoid parsing // the file for this timerange. continue - } nextSampleTs = math.MaxInt64 @@ -207,13 +206,11 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn return nil }() - if err != nil { return errors.Wrap(err, "process blocks") } } return nil - } func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) { diff --git a/cmd/promtool/debug.go b/cmd/promtool/debug.go index 280a0d44b..23d613bb0 100644 --- a/cmd/promtool/debug.go +++ b/cmd/promtool/debug.go @@ -57,7 +57,6 @@ func debugWrite(cfg debugWriterConfig) error { return errors.Wrap(err, "error writing into the archive") } } - } if err := archiver.close(); err != nil { diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index d49892d2d..5ba483566 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -257,7 +257,7 @@ func main() { case tsdbDumpCmd.FullCommand(): os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime))) - //TODO(aSquare14): Work on adding support for custom block size. + // TODO(aSquare14): Work on adding support for custom block size. case openMetricsImportCmd.FullCommand(): os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) @@ -560,7 +560,6 @@ func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType { var rules compareRuleTypes for _, group := range groups { - for _, rule := range group.Rules { rules = append(rules, compareRuleType{ metric: ruleMetric(rule), @@ -774,7 +773,7 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer) } // QueryLabels queries for label values against a Prometheus server. -func QueryLabels(url *url.URL, name string, start, end string, p printer) int { +func QueryLabels(url *url.URL, name, start, end string, p printer) int { if url.Scheme == "" { url.Scheme = "http" } @@ -952,11 +951,13 @@ type promqlPrinter struct{} func (p *promqlPrinter) printValue(v model.Value) { fmt.Println(v) } + func (p *promqlPrinter) printSeries(val []model.LabelSet) { for _, v := range val { fmt.Println(v) } } + func (p *promqlPrinter) printLabelValues(val model.LabelValues) { for _, v := range val { fmt.Println(v) @@ -969,10 +970,12 @@ func (j *jsonPrinter) printValue(v model.Value) { //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) } + func (j *jsonPrinter) printSeries(v []model.LabelSet) { //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) } + func (j *jsonPrinter) printLabelValues(v model.LabelValues) { //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) @@ -980,7 +983,7 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) { // importRules backfills recording rules from the files provided. The output are blocks of data // at the outputDir location. -func importRules(url *url.URL, start, end, outputDir string, evalInterval time.Duration, maxBlockDuration time.Duration, files ...string) error { +func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error { ctx := context.Background() var stime, etime time.Time var err error diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index d1f8938b9..868dff984 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -54,7 +54,7 @@ func TestBackfillRuleIntegration(t *testing.T) { twentyFourHourDuration, _ = time.ParseDuration("24h") ) - var testCases = []struct { + testCases := []struct { name string runcount int maxBlockDuration time.Duration @@ -192,7 +192,7 @@ func createSingleRuleTestFiles(path string) error { labels: testlabel11: testlabelvalue11 ` - return ioutil.WriteFile(path, []byte(recordingRules), 0777) + return ioutil.WriteFile(path, []byte(recordingRules), 0o777) } func createMultiRuleTestFiles(path string) error { @@ -212,7 +212,7 @@ func createMultiRuleTestFiles(path string) error { labels: testlabel11: testlabelvalue13 ` - return ioutil.WriteFile(path, []byte(recordingRules), 0777) + return ioutil.WriteFile(path, []byte(recordingRules), 0o777) } // TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics @@ -244,7 +244,7 @@ func TestBackfillLabels(t *testing.T) { labels: name1: value-from-rule ` - require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0777)) + require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0o777)) errs := ruleImporter.loadGroups(ctx, []string{path}) for _, err := range errs { require.NoError(t, err) diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go index e985197b6..d9d974481 100644 --- a/cmd/promtool/sd_test.go +++ b/cmd/promtool/sd_test.go @@ -46,21 +46,24 @@ func TestSDCheckResult(t *testing.T) { } expectedSDCheckResult := []sdCheckResult{ - sdCheckResult{ + { DiscoveredLabels: labels.Labels{ labels.Label{Name: "__address__", Value: "localhost:8080"}, labels.Label{Name: "__scrape_interval__", Value: "0s"}, labels.Label{Name: "__scrape_timeout__", Value: "0s"}, - labels.Label{Name: "foo", Value: "bar"}}, + labels.Label{Name: "foo", Value: "bar"}, + }, Labels: labels.Labels{ labels.Label{Name: "__address__", Value: "localhost:8080"}, labels.Label{Name: "__scrape_interval__", Value: "0s"}, labels.Label{Name: "__scrape_timeout__", Value: "0s"}, labels.Label{Name: "foo", Value: "bar"}, labels.Label{Name: "instance", Value: "localhost:8080"}, - labels.Label{Name: "newfoo", Value: "bar"}}, + labels.Label{Name: "newfoo", Value: "bar"}, + }, Error: nil, - }} + }, + } require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig)) } diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 45cde87cf..5e77802a9 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -17,7 +17,6 @@ import ( "bufio" "context" "fmt" - "github.com/prometheus/prometheus/tsdb/index" "io" "io/ioutil" "math" @@ -32,6 +31,8 @@ import ( "text/tabwriter" "time" + "github.com/prometheus/prometheus/tsdb/index" + "github.com/alecthomas/units" "github.com/go-kit/log" "github.com/pkg/errors" @@ -78,7 +79,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err if err := os.RemoveAll(b.outPath); err != nil { return err } - if err := os.MkdirAll(b.outPath, 0777); err != nil { + if err := os.MkdirAll(b.outPath, 0o777); err != nil { return err } @@ -589,7 +590,7 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err histogram := make([]int, nBuckets) totalChunks := 0 for postingsr.Next() { - var lbsl = labels.Labels{} + lbsl := labels.Labels{} var chks []chunks.Meta if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil { return err @@ -671,14 +672,14 @@ func checkErr(err error) int { return 0 } -func backfillOpenMetrics(path string, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { +func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { inputFile, err := fileutil.OpenMmapFile(path) if err != nil { return checkErr(err) } defer inputFile.Close() - if err := os.MkdirAll(outputDir, 0777); err != nil { + if err := os.MkdirAll(outputDir, 0o777); err != nil { return checkErr(errors.Wrap(err, "create output dir")) } diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 5dcc5bb56..637846303 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -387,7 +387,6 @@ Outer: // seriesLoadingString returns the input series in PromQL notation. func (tg *testGroup) seriesLoadingString() string { - result := fmt.Sprintf("load %v\n", shortDuration(tg.Interval)) for _, is := range tg.InputSeries { result += fmt.Sprintf(" %v %v\n", is.Series, is.Values) diff --git a/config/config.go b/config/config.go index 9729642ae..a247898a9 100644 --- a/config/config.go +++ b/config/config.go @@ -99,7 +99,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro } // LoadFile parses the given YAML file into a Config. -func LoadFile(filename string, agentMode bool, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { content, err := ioutil.ReadFile(filename) if err != nil { return nil, err diff --git a/config/config_test.go b/config/config_test.go index f26e4f3e6..4fe5731a2 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -784,17 +784,19 @@ var expectedConf = &Config{ Scheme: DefaultScrapeConfig.Scheme, HTTPClientConfig: config.DefaultHTTPClientConfig, - ServiceDiscoveryConfigs: discovery.Configs{&openstack.SDConfig{ - Role: "instance", - Region: "RegionOne", - Port: 80, - Availability: "public", - RefreshInterval: model.Duration(60 * time.Second), - TLSConfig: config.TLSConfig{ - CAFile: "testdata/valid_ca_file", - CertFile: "testdata/valid_cert_file", - KeyFile: "testdata/valid_key_file", - }}, + ServiceDiscoveryConfigs: discovery.Configs{ + &openstack.SDConfig{ + Role: "instance", + Region: "RegionOne", + Port: 80, + Availability: "public", + RefreshInterval: model.Duration(60 * time.Second), + TLSConfig: config.TLSConfig{ + CAFile: "testdata/valid_ca_file", + CertFile: "testdata/valid_cert_file", + KeyFile: "testdata/valid_key_file", + }, + }, }, }, { @@ -808,22 +810,23 @@ var expectedConf = &Config{ Scheme: DefaultScrapeConfig.Scheme, HTTPClientConfig: config.DefaultHTTPClientConfig, - ServiceDiscoveryConfigs: discovery.Configs{&puppetdb.SDConfig{ - URL: "https://puppetserver/", - Query: "resources { type = \"Package\" and title = \"httpd\" }", - IncludeParameters: true, - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - HTTPClientConfig: config.HTTPClientConfig{ - FollowRedirects: true, - TLSConfig: config.TLSConfig{ - CAFile: "testdata/valid_ca_file", - CertFile: "testdata/valid_cert_file", - KeyFile: "testdata/valid_key_file", + ServiceDiscoveryConfigs: discovery.Configs{ + &puppetdb.SDConfig{ + URL: "https://puppetserver/", + Query: "resources { type = \"Package\" and title = \"httpd\" }", + IncludeParameters: true, + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + TLSConfig: config.TLSConfig{ + CAFile: "testdata/valid_ca_file", + CertFile: "testdata/valid_cert_file", + KeyFile: "testdata/valid_key_file", + }, }, }, }, - }, }, { JobName: "hetzner", @@ -1086,170 +1089,224 @@ var expectedErrors = []struct { { filename: "jobname.bad.yml", errMsg: `job_name is empty`, - }, { + }, + { filename: "jobname_dup.bad.yml", errMsg: `found multiple scrape configs with job name "prometheus"`, - }, { + }, + { filename: "scrape_interval.bad.yml", errMsg: `scrape timeout greater than scrape interval`, - }, { + }, + { filename: "labelname.bad.yml", errMsg: `"not$allowed" is not a valid label name`, - }, { + }, + { filename: "labelname2.bad.yml", errMsg: `"not:allowed" is not a valid label name`, - }, { + }, + { filename: "labelvalue.bad.yml", errMsg: `"\xff" is not a valid label value`, - }, { + }, + { filename: "regex.bad.yml", errMsg: "error parsing regexp", - }, { + }, + { filename: "modulus_missing.bad.yml", errMsg: "relabel configuration for hashmod requires non-zero modulus", - }, { + }, + { filename: "labelkeep.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep2.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep3.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep4.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep5.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop2.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop3.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop4.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop5.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelmap.bad.yml", errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action", - }, { + }, + { filename: "rules.bad.yml", errMsg: "invalid rule file path", - }, { + }, + { filename: "unknown_attr.bad.yml", errMsg: "field consult_sd_configs not found in type", - }, { + }, + { filename: "bearertoken.bad.yml", errMsg: "at most one of bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "bearertoken_basicauth.bad.yml", errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "kubernetes_http_config_without_api_server.bad.yml", errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly", - }, { + }, + { filename: "kubernetes_kubeconfig_with_apiserver.bad.yml", errMsg: "cannot use 'kubeconfig_file' and 'api_server' simultaneously", - }, { + }, + { filename: "kubernetes_kubeconfig_with_http_config.bad.yml", errMsg: "cannot use a custom HTTP client configuration together with 'kubeconfig_file'", }, { filename: "kubernetes_bearertoken.bad.yml", errMsg: "at most one of bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "kubernetes_role.bad.yml", errMsg: "role", - }, { + }, + { filename: "kubernetes_selectors_endpoints.bad.yml", errMsg: "endpoints role supports only pod, service, endpoints selectors", - }, { + }, + { filename: "kubernetes_selectors_ingress.bad.yml", errMsg: "ingress role supports only ingress selectors", - }, { + }, + { filename: "kubernetes_selectors_node.bad.yml", errMsg: "node role supports only node selectors", - }, { + }, + { filename: "kubernetes_selectors_pod.bad.yml", errMsg: "pod role supports only pod selectors", - }, { + }, + { filename: "kubernetes_selectors_service.bad.yml", errMsg: "service role supports only service selectors", - }, { + }, + { filename: "kubernetes_namespace_discovery.bad.yml", errMsg: "field foo not found in type kubernetes.plain", - }, { + }, + { filename: "kubernetes_selectors_duplicated_role.bad.yml", errMsg: "duplicated selector role: pod", - }, { + }, + { filename: "kubernetes_selectors_incorrect_selector.bad.yml", errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'", - }, { + }, + { filename: "kubernetes_bearertoken_basicauth.bad.yml", errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "kubernetes_authorization_basicauth.bad.yml", errMsg: "at most one of basic_auth, oauth2 & authorization must be configured", - }, { + }, + { filename: "marathon_no_servers.bad.yml", errMsg: "marathon_sd: must contain at least one Marathon server", - }, { + }, + { filename: "marathon_authtoken_authtokenfile.bad.yml", errMsg: "marathon_sd: at most one of auth_token & auth_token_file must be configured", - }, { + }, + { filename: "marathon_authtoken_basicauth.bad.yml", errMsg: "marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured", - }, { + }, + { filename: "marathon_authtoken_bearertoken.bad.yml", errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured", - }, { + }, + { filename: "marathon_authtoken_authorization.bad.yml", errMsg: "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured", - }, { + }, + { filename: "openstack_role.bad.yml", errMsg: "unknown OpenStack SD role", - }, { + }, + { filename: "openstack_availability.bad.yml", errMsg: "unknown availability invalid, must be one of admin, internal or public", - }, { + }, + { filename: "url_in_targetgroup.bad.yml", errMsg: "\"http://bad\" is not a valid hostname", - }, { + }, + { filename: "target_label_missing.bad.yml", errMsg: "relabel configuration for replace action requires 'target_label' value", - }, { + }, + { filename: "target_label_hashmod_missing.bad.yml", errMsg: "relabel configuration for hashmod action requires 'target_label' value", - }, { + }, + { filename: "unknown_global_attr.bad.yml", errMsg: "field nonexistent_field not found in type config.plain", - }, { + }, + { filename: "remote_read_url_missing.bad.yml", errMsg: `url for remote_read is empty`, - }, { + }, + { filename: "remote_write_header.bad.yml", errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`, - }, { + }, + { filename: "remote_read_header.bad.yml", errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`, - }, { + }, + { filename: "remote_write_authorization_header.bad.yml", errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`, - }, { + }, + { filename: "remote_write_url_missing.bad.yml", errMsg: `url for remote_write is empty`, - }, { + }, + { filename: "remote_write_dup.bad.yml", errMsg: `found multiple remote write configs with job name "queue1"`, - }, { + }, + { filename: "remote_read_dup.bad.yml", errMsg: `found multiple remote read configs with job name "queue1"`, }, diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index 0bcfd0547..8984035e2 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -63,13 +63,11 @@ const ( ec2LabelSeparator = "," ) -var ( - // DefaultEC2SDConfig is the default EC2 SD configuration. - DefaultEC2SDConfig = EC2SDConfig{ - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - } -) +// DefaultEC2SDConfig is the default EC2 SD configuration. +var DefaultEC2SDConfig = EC2SDConfig{ + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), +} func init() { discovery.RegisterConfig(&EC2SDConfig{}) diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index e3dc65b5d..e5165776f 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -53,13 +53,11 @@ const ( lightsailLabelSeparator = "," ) -var ( - // DefaultLightsailSDConfig is the default Lightsail SD configuration. - DefaultLightsailSDConfig = LightsailSDConfig{ - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - } -) +// DefaultLightsailSDConfig is the default Lightsail SD configuration. +var DefaultLightsailSDConfig = LightsailSDConfig{ + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), +} func init() { discovery.RegisterConfig(&LightsailSDConfig{}) diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index c81eb2205..3e7cd4e12 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -339,7 +339,6 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { // Get the IP address information via separate call to the network provider. for _, nicID := range vm.NetworkInterfaces { networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID) - if err != nil { level.Error(d.logger).Log("msg", "Unable to get network interface", "name", nicID, "err", err) ch <- target{labelSet: nil, err: err} @@ -437,9 +436,8 @@ func (client *azureClient) getScaleSets(ctx context.Context) ([]compute.VirtualM func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) { var vms []virtualMachine - //TODO do we really need to fetch the resourcegroup this way? + // TODO do we really need to fetch the resourcegroup this way? r, err := newAzureResourceFromID(*scaleSet.ID, nil) - if err != nil { return nil, errors.Wrap(err, "could not parse scale set ID") } diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 16d0d2628..0aef808d5 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -54,7 +54,7 @@ const ( healthLabel = model.MetaLabelPrefix + "consul_health" // serviceAddressLabel is the name of the label containing the (optional) service address. serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" - //servicePortLabel is the name of the label containing the service port. + // servicePortLabel is the name of the label containing the service port. servicePortLabel = model.MetaLabelPrefix + "consul_service_port" // datacenterLabel is the name of the label containing the datacenter ID. datacenterLabel = model.MetaLabelPrefix + "consul_dc" @@ -530,7 +530,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr for _, serviceNode := range serviceNodes { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. - var tags = srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator + tags := srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator // If the service address is not empty it should be used instead of the node address // since the service may be registered remotely through a different node. diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index 15092c304..986c6a4d2 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -37,9 +37,9 @@ func TestMain(m *testing.M) { func TestConfiguredService(t *testing.T) { conf := &SDConfig{ - Services: []string{"configuredServiceName"}} + Services: []string{"configuredServiceName"}, + } consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -57,7 +57,6 @@ func TestConfiguredServiceWithTag(t *testing.T) { ServiceTags: []string{"http"}, } consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -153,7 +152,6 @@ func TestConfiguredServiceWithTags(t *testing.T) { for _, tc := range cases { consulDiscovery, err := NewDiscovery(tc.conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -168,7 +166,6 @@ func TestConfiguredServiceWithTags(t *testing.T) { func TestNonConfiguredService(t *testing.T) { conf := &SDConfig{} consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } diff --git a/discovery/digitalocean/mock_test.go b/discovery/digitalocean/mock_test.go index edbdf92ba..2f19b5e1a 100644 --- a/discovery/digitalocean/mock_test.go +++ b/discovery/digitalocean/mock_test.go @@ -75,7 +75,8 @@ func (m *SDMock) HandleDropletsList() { panic(err) } } - fmt.Fprint(w, []string{` + fmt.Fprint(w, []string{ + ` { "droplets": [ { diff --git a/discovery/file/file_test.go b/discovery/file/file_test.go index bf50fc257..4a9f8c26f 100644 --- a/discovery/file/file_test.go +++ b/discovery/file/file_test.go @@ -73,7 +73,7 @@ func (t *testRunner) copyFile(src string) string { } // copyFileTo atomically copies a file with a different name to the runner's directory. -func (t *testRunner) copyFileTo(src string, name string) string { +func (t *testRunner) copyFileTo(src, name string) string { t.Helper() newf, err := ioutil.TempFile(t.dir, "") @@ -95,7 +95,7 @@ func (t *testRunner) copyFileTo(src string, name string) string { } // writeString writes atomically a string to a file. -func (t *testRunner) writeString(file string, data string) { +func (t *testRunner) writeString(file, data string) { t.Helper() newf, err := ioutil.TempFile(t.dir, "") @@ -477,6 +477,7 @@ func TestRemoveFile(t *testing.T) { }, { Source: fileSource(sdFile, 1), - }}, + }, + }, ) } diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index 35492e6a0..aa406a1a7 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -78,6 +78,7 @@ func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, er ) return d, nil } + func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { servers, err := d.client.Server.All(ctx) if err != nil { diff --git a/discovery/hetzner/mock_test.go b/discovery/hetzner/mock_test.go index 5936d5257..ecf313274 100644 --- a/discovery/hetzner/mock_test.go +++ b/discovery/hetzner/mock_test.go @@ -489,8 +489,10 @@ func (m *SDMock) HandleHcloudNetworks() { }) } -const robotTestUsername = "my-hetzner" -const robotTestPassword = "my-password" +const ( + robotTestUsername = "my-hetzner" + robotTestPassword = "my-password" +) // HandleRobotServers mocks the robot servers list endpoint. func (m *SDMock) HandleRobotServers() { diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 25f04502f..d0f3e4d94 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -70,6 +70,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro return d, nil } + func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { req, err := http.NewRequest("GET", d.endpoint+"/server", nil) if err != nil { diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index e9dc98b8f..b7a0369e7 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -60,7 +60,6 @@ func TestHTTPValidRefresh(t *testing.T) { }, } require.Equal(t, tgs, expectedTargets) - } func TestHTTPInvalidCode(t *testing.T) { @@ -398,5 +397,4 @@ func TestSourceDisappeared(t *testing.T) { require.Equal(t, test.expectedTargets[i], tgs) } } - } diff --git a/discovery/kubernetes/client_metrics.go b/discovery/kubernetes/client_metrics.go index 45e249be2..3a33e3e8d 100644 --- a/discovery/kubernetes/client_metrics.go +++ b/discovery/kubernetes/client_metrics.go @@ -121,9 +121,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer clientGoRequestLatencyMetricVec, ) } -func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code string, method string, host string) { + +func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) { clientGoRequestResultMetricVec.WithLabelValues(code).Inc() } + func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) { clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) } @@ -146,21 +148,27 @@ func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Regist func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { return clientGoWorkqueueDepthMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { return clientGoWorkqueueAddsMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) } + func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { // Retries are not used so the metric is omitted. return noopMetric{} diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index e73d8ba06..335242c3c 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -27,7 +27,7 @@ import ( ) func makeEndpoints() *v1.Endpoints { - var nodeName = "foobar" + nodeName := "foobar" return &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index e1ca23402..2da8ec546 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -171,13 +171,15 @@ type hasSynced interface { hasSynced() bool } -var _ hasSynced = &Discovery{} -var _ hasSynced = &Node{} -var _ hasSynced = &Endpoints{} -var _ hasSynced = &EndpointSlice{} -var _ hasSynced = &Ingress{} -var _ hasSynced = &Pod{} -var _ hasSynced = &Service{} +var ( + _ hasSynced = &Discovery{} + _ hasSynced = &Node{} + _ hasSynced = &Endpoints{} + _ hasSynced = &EndpointSlice{} + _ hasSynced = &Ingress{} + _ hasSynced = &Pod{} + _ hasSynced = &Service{} +) func (d *Discovery) hasSynced() bool { d.RLock() diff --git a/discovery/kubernetes/node_test.go b/discovery/kubernetes/node_test.go index 4a08eefc8..afdaaf6b2 100644 --- a/discovery/kubernetes/node_test.go +++ b/discovery/kubernetes/node_test.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node { +func makeNode(name, address string, labels, annotations map[string]string) *v1.Node { return &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: name, diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index a977fcd29..ce2278d23 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -37,7 +37,6 @@ func TestMain(m *testing.M) { // TestTargetUpdatesOrder checks that the target updates are received in the expected order. func TestTargetUpdatesOrder(t *testing.T) { - // The order by which the updates are send is determined by the interval passed to the mock discovery adapter // Final targets array is ordered alphabetically by the name of the discoverer. // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. @@ -117,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T) { { Source: "tp1_group2", Targets: []model.LabelSet{{"__instance__": "2"}}, - }}, + }, + }, }, }, }, @@ -729,14 +729,12 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou match := false var mergedTargets string for _, targetGroup := range tSets[poolKey] { - for _, l := range targetGroup.Targets { mergedTargets = mergedTargets + " " + l.String() if l.String() == label { match = true } } - } if match != present { msg := "" @@ -926,7 +924,6 @@ func TestGaugeFailedConfigs(t *testing.T) { if failedCount != 0 { t.Fatalf("Expected to get no failed config, got: %v", failedCount) } - } func TestCoordinationWithReceiver(t *testing.T) { diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 657130e5e..37d57f94c 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -37,7 +37,6 @@ func TestMain(m *testing.M) { // TestTargetUpdatesOrder checks that the target updates are received in the expected order. func TestTargetUpdatesOrder(t *testing.T) { - // The order by which the updates are send is determined by the interval passed to the mock discovery adapter // Final targets array is ordered alphabetically by the name of the discoverer. // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. @@ -117,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T) { { Source: "tp1_group2", Targets: []model.LabelSet{{"__instance__": "2"}}, - }}, + }, + }, }, }, }, @@ -719,7 +719,7 @@ func staticConfig(addrs ...string) StaticConfig { return cfg } -func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key string, label string, present bool) { +func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key, label string, present bool) { t.Helper() if _, ok := tGroups[key]; !ok { t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups) @@ -734,7 +734,6 @@ func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, match = true } } - } if match != present { msg := "" @@ -755,14 +754,12 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou match := false var mergedTargets string for _, targetGroup := range tSets[poolKey] { - for _, l := range targetGroup.Targets { mergedTargets = mergedTargets + " " + l.String() if l.String() == label { match = true } } - } if match != present { msg := "" @@ -1062,7 +1059,6 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) } - } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { @@ -1179,7 +1175,6 @@ func TestGaugeFailedConfigs(t *testing.T) { if failedCount != 0 { t.Fatalf("Expected to get no failed config, got: %v", failedCount) } - } func TestCoordinationWithReceiver(t *testing.T) { diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 30c32bb5d..5688efedd 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -478,7 +478,6 @@ func targetsForApp(app *app) []model.LabelSet { // Generate a target endpoint string in host:port format. func targetEndpoint(task *task, port uint32, containerNet bool) string { - var host string // Use the task's ipAddress field when it's in a container network @@ -493,7 +492,6 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string { // Get a list of ports and a list of labels from a PortMapping. func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32, []map[string]string) { - ports := make([]uint32, len(portMappings)) labels := make([]map[string]string, len(portMappings)) diff --git a/discovery/marathon/marathon_test.go b/discovery/marathon/marathon_test.go index 8ac9713a2..00d066e27 100644 --- a/discovery/marathon/marathon_test.go +++ b/discovery/marathon/marathon_test.go @@ -60,9 +60,7 @@ func TestMarathonSDHandleError(t *testing.T) { } func TestMarathonSDEmptyList(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -99,11 +97,9 @@ func marathonTestAppList(labels map[string]string, runningTasks int) *appList { } func TestMarathonSDSendGroup(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppList(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppList(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -195,11 +191,9 @@ func marathonTestAppListWithMultiplePorts(labels map[string]string, runningTasks } func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -254,11 +248,9 @@ func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int) } func TestMarathonZeroTaskPorts(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -331,11 +323,9 @@ func marathonTestAppListWithPortDefinitions(labels map[string]string, runningTas } func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -403,11 +393,9 @@ func marathonTestAppListWithPortDefinitionsRequirePorts(labels map[string]string } func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -470,11 +458,9 @@ func marathonTestAppListWithPorts(labels map[string]string, runningTasks int) *a } func TestMarathonSDSendGroupWithPorts(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithPorts(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithPorts(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -546,11 +532,9 @@ func marathonTestAppListWithContainerPortMappings(labels map[string]string, runn } func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -622,11 +606,9 @@ func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string } func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -702,11 +684,9 @@ func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]st } func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index 594f4e433..877b3eb9b 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -51,8 +51,10 @@ type HypervisorDiscovery struct { // newHypervisorDiscovery returns a new hypervisor discovery. func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, port int, region string, availability gophercloud.Availability, l log.Logger) *HypervisorDiscovery { - return &HypervisorDiscovery{provider: provider, authOpts: opts, - region: region, port: port, availability: availability, logger: l} + return &HypervisorDiscovery{ + provider: provider, authOpts: opts, + region: region, port: port, availability: availability, logger: l, + } } func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go index 7556a88f3..396d5283d 100644 --- a/discovery/openstack/hypervisor_test.go +++ b/discovery/openstack/hypervisor_test.go @@ -47,7 +47,6 @@ func (s *OpenstackSDHypervisorTestSuite) openstackAuthSuccess() (refresher, erro } func TestOpenstackSDHypervisorRefresh(t *testing.T) { - mock := &OpenstackSDHypervisorTestSuite{} mock.SetupTest(t) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index ab2221f4f..fa4039bea 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -63,8 +63,10 @@ func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gopherclou if l == nil { l = log.NewNopLogger() } - return &InstanceDiscovery{provider: provider, authOpts: opts, - region: region, port: port, allTenants: allTenants, availability: availability, logger: l} + return &InstanceDiscovery{ + provider: provider, authOpts: opts, + region: region, port: port, allTenants: allTenants, availability: availability, logger: l, + } } type floatingIPKey struct { diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index 8275727a6..d47cb0020 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -51,7 +51,6 @@ func (s *OpenstackSDInstanceTestSuite) openstackAuthSuccess() (refresher, error) } func TestOpenstackSDInstanceRefresh(t *testing.T) { - mock := &OpenstackSDInstanceTestSuite{} mock.SetupTest(t) diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index f29d647d4..e64c33648 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -54,7 +54,7 @@ func testMethod(t *testing.T, r *http.Request, expected string) { } } -func testHeader(t *testing.T, r *http.Request, header string, expected string) { +func testHeader(t *testing.T, r *http.Request, header, expected string) { if actual := r.Header.Get(header); expected != actual { t.Errorf("Header %s = %s, expected %s", header, actual, expected) } diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index 2a341976e..932a4f5c2 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -145,7 +145,6 @@ func NewDiscovery(conf *SDConfig, l log.Logger) (*refresh.Discovery, error) { time.Duration(conf.RefreshInterval), r.refresh, ), nil - } func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index c8689cb94..920f087a1 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -173,8 +173,7 @@ func init() { // Discovery periodically performs Scaleway requests. It implements // the Discoverer interface. -type Discovery struct { -} +type Discovery struct{} func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { r, err := newRefresher(conf) diff --git a/discovery/targetgroup/targetgroup_test.go b/discovery/targetgroup/targetgroup_test.go index bf0d99553..fe9587eb8 100644 --- a/discovery/targetgroup/targetgroup_test.go +++ b/discovery/targetgroup/targetgroup_test.go @@ -38,7 +38,8 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) { expectedReply: nil, expectedGroup: Group{Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, - {"__address__": "localhost:9091"}}, Labels: model.LabelSet{"my": "label"}}, + {"__address__": "localhost:9091"}, + }, Labels: model.LabelSet{"my": "label"}}, }, { json: ` {"label": {},"targets": []}`, @@ -56,7 +57,6 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) { require.Equal(t, test.expectedReply, actual) require.Equal(t, test.expectedGroup, tg) } - } func TestTargetGroupYamlMarshal(t *testing.T) { @@ -81,10 +81,13 @@ func TestTargetGroupYamlMarshal(t *testing.T) { }, { // targets only exposes addresses. - group: Group{Targets: []model.LabelSet{ - {"__address__": "localhost:9090"}, - {"__address__": "localhost:9091"}}, - Labels: model.LabelSet{"foo": "bar", "bar": "baz"}}, + group: Group{ + Targets: []model.LabelSet{ + {"__address__": "localhost:9090"}, + {"__address__": "localhost:9091"}, + }, + Labels: model.LabelSet{"foo": "bar", "bar": "baz"}, + }, expectedYaml: "targets:\n- localhost:9090\n- localhost:9091\nlabels:\n bar: baz\n foo: bar\n", expectedErr: nil, }, @@ -120,7 +123,8 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) { expectedReply: nil, expectedGroup: Group{Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, - {"__address__": "localhost:9191"}}, Labels: model.LabelSet{"my": "label"}}, + {"__address__": "localhost:9191"}, + }, Labels: model.LabelSet{"my": "label"}}, }, { // incorrect syntax. @@ -135,21 +139,25 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) { require.Equal(t, test.expectedReply, actual) require.Equal(t, test.expectedGroup, tg) } - } func TestString(t *testing.T) { // String() should return only the source, regardless of other attributes. group1 := - Group{Targets: []model.LabelSet{ - {"__address__": "localhost:9090"}, - {"__address__": "localhost:9091"}}, + Group{ + Targets: []model.LabelSet{ + {"__address__": "localhost:9090"}, + {"__address__": "localhost:9091"}, + }, Source: "", - Labels: model.LabelSet{"foo": "bar", "bar": "baz"}} + Labels: model.LabelSet{"foo": "bar", "bar": "baz"}, + } group2 := - Group{Targets: []model.LabelSet{}, - Source: "", - Labels: model.LabelSet{}} + Group{ + Targets: []model.LabelSet{}, + Source: "", + Labels: model.LabelSet{}, + } require.Equal(t, "", group1.String()) require.Equal(t, "", group2.String()) require.Equal(t, group1.String(), group2.String()) diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index 96b9f39b1..66efd9bbc 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -190,7 +190,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { default: return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role) } - var endpoint = fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version) + endpoint := fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version) if len(d.sdConfig.Groups) > 0 { groups := url.QueryEscape(strings.Join(d.sdConfig.Groups, ",")) endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups) diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index f83ffcc97..ca3896532 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -135,8 +135,7 @@ func TestTritonSDRefreshNoTargets(t *testing.T) { } func TestTritonSDRefreshMultipleTargets(t *testing.T) { - var ( - dstr = `{"containers":[ + dstr := `{"containers":[ { "groups":["foo","bar","baz"], "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131", @@ -153,7 +152,6 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) { "vm_uuid":"7b27a514-89d7-11e6-bee6-3f96f367bee7" }] }` - ) tgts := testTritonSDRefresh(t, conf, dstr) require.NotNil(t, tgts) @@ -161,9 +159,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) { } func TestTritonSDRefreshNoServer(t *testing.T) { - var ( - td, _ = newTritonDiscovery(conf) - ) + td, _ := newTritonDiscovery(conf) _, err := td.refresh(context.Background()) require.Error(t, err) @@ -171,9 +167,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) { } func TestTritonSDRefreshCancelled(t *testing.T) { - var ( - td, _ = newTritonDiscovery(conf) - ) + td, _ := newTritonDiscovery(conf) ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -183,8 +177,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) { } func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { - var ( - dstr = `{"cns":[ + dstr := `{"cns":[ { "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131" }, @@ -192,7 +185,6 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { "server_uuid":"a5894692-bd32-4ca1-908a-e2dda3c3a5e6" }] }` - ) tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) @@ -200,8 +192,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { } func TestTritonSDRefreshCNsWithHostname(t *testing.T) { - var ( - dstr = `{"cns":[ + dstr := `{"cns":[ { "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131", "server_hostname": "server01" @@ -211,7 +202,6 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) { "server_hostname": "server02" }] }` - ) tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index a6b2b6fce..df8efeeaf 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -119,7 +119,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) - if err != nil { return err } @@ -141,7 +140,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -func login(rpcclient *xmlrpc.Client, user string, pass string) (string, error) { +func login(rpcclient *xmlrpc.Client, user, pass string) (string, error) { var result string err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result) return result, err @@ -151,7 +150,7 @@ func logout(rpcclient *xmlrpc.Client, token string) error { return rpcclient.Call("auth.logout", token, nil) } -func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token string, entitlement string) (map[int][]systemGroupID, error) { +func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, entitlement string) (map[int][]systemGroupID, error) { var systemGroupsInfos []struct { SystemID int `xmlrpc:"id"` SystemGroups []systemGroupID `xmlrpc:"system_groups"` @@ -234,7 +233,6 @@ func (d *Discovery) getEndpointLabels( systemGroupIDs []systemGroupID, networkInfo networkInfo, ) model.LabelSet { - var addr, scheme string managedGroupNames := getSystemGroupNames(systemGroupIDs) addr = fmt.Sprintf("%s:%d", networkInfo.Hostname, endpoint.Port) @@ -274,7 +272,6 @@ func (d *Discovery) getTargetsForSystems( token string, entitlement string, ) ([]model.LabelSet, error) { - result := make([]model.LabelSet, 0) systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, token, entitlement) diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index 1c0e321d3..fda7ba320 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -26,22 +26,19 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) -var ( - httpResourceConf = &HTTPResourceClientConfig{ - HTTPClientConfig: config.HTTPClientConfig{ - TLSConfig: config.TLSConfig{InsecureSkipVerify: true}, - }, - ResourceType: "monitoring", - // Some known type. - ResourceTypeURL: "type.googleapis.com/envoy.service.discovery.v3.DiscoveryRequest", - Server: "http://localhost", - ClientID: "test-id", - } -) +var httpResourceConf = &HTTPResourceClientConfig{ + HTTPClientConfig: config.HTTPClientConfig{ + TLSConfig: config.TLSConfig{InsecureSkipVerify: true}, + }, + ResourceType: "monitoring", + // Some known type. + ResourceTypeURL: "type.googleapis.com/envoy.service.discovery.v3.DiscoveryRequest", + Server: "http://localhost", + ClientID: "test-id", +} func urlMustParse(str string) *url.URL { parsed, err := url.Parse(str) - if err != nil { panic(err) } @@ -92,7 +89,6 @@ func TestCreateNewHTTPResourceClient(t *testing.T) { require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1") require.Equal(t, client.client.Timeout, 1*time.Minute) - } func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) { diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index 0de1b986d..b62e9a0ef 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -91,7 +91,6 @@ func getKumaMadsV1DiscoveryResponse(resources ...*MonitoringAssignment) (*v3.Dis serialized := make([]*anypb.Any, len(resources)) for i, res := range resources { data, err := proto.Marshal(res) - if err != nil { return nil, err } diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index 2214a823d..80a8f1b5e 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -50,7 +50,7 @@ var ( tagsLabel = model.MetaLabelPrefix + "consul_tags" // serviceAddressLabel is the name of the label containing the (optional) service address. serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" - //servicePortLabel is the name of the label containing the service port. + // servicePortLabel is the name of the label containing the service port. servicePortLabel = model.MetaLabelPrefix + "consul_service_port" // serviceIDLabel is the name of the label containing the service ID. serviceIDLabel = model.MetaLabelPrefix + "consul_service_id" @@ -120,7 +120,7 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target for _, node := range nodes { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. - var tags = "," + strings.Join(node.ServiceTags, ",") + "," + tags := "," + strings.Join(node.ServiceTags, ",") + "," // If the service address is not empty it should be used instead of the node address // since the service may be registered remotely through a different node. @@ -162,7 +162,6 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { for c := time.Tick(time.Duration(d.refreshInterval) * time.Second); ; { var srvs map[string][]string resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address)) - if err != nil { level.Error(d.logger).Log("msg", "Error getting services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) diff --git a/documentation/examples/custom-sd/adapter/adapter.go b/documentation/examples/custom-sd/adapter/adapter.go index e355e7b71..564a4e83b 100644 --- a/documentation/examples/custom-sd/adapter/adapter.go +++ b/documentation/examples/custom-sd/adapter/adapter.go @@ -163,7 +163,7 @@ func (a *Adapter) Run() { } // NewAdapter creates a new instance of Adapter. -func NewAdapter(ctx context.Context, file string, name string, d discovery.Discoverer, logger log.Logger) *Adapter { +func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger) *Adapter { return &Adapter{ ctx: ctx, disc: d, diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go index fedea48db..36242a8f4 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go @@ -37,7 +37,7 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, address string, transport string, timeout time.Duration, prefix string) *Client { +func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client { if logger == nil { logger = log.NewNopLogger() } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go index 68844d327..535027e07 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go @@ -20,13 +20,11 @@ import ( "github.com/stretchr/testify/require" ) -var ( - metric = model.Metric{ - model.MetricNameLabel: "test:metric", - "testlabel": "test:value", - "many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\", - } -) +var metric = model.Metric{ + model.MetricNameLabel: "test:metric", + "testlabel": "test:value", + "many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\", +} func TestEscape(t *testing.T) { // Can we correctly keep and escape valid chars. diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index 061c2e76d..f0ee58c5a 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -41,7 +41,7 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, conf influx.HTTPConfig, db string, rp string) *Client { +func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client { c, err := influx.NewHTTPClient(conf) // Currently influx.NewClient() *should* never return an error. if err != nil { diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go index 51906efce..a30448e76 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go @@ -21,13 +21,11 @@ import ( "github.com/stretchr/testify/require" ) -var ( - metric = model.Metric{ - model.MetricNameLabel: "test:metric", - "testlabel": "test:value", - "many_chars": "abc!ABC:012-3!45ö67~89./", - } -) +var metric = model.Metric{ + model.MetricNameLabel: "test:metric", + "testlabel": "test:value", + "many_chars": "abc!ABC:012-3!45ö67~89./", +} func TestTagsFromMetric(t *testing.T) { expected := map[string]TagValue{ diff --git a/notifier/notifier.go b/notifier/notifier.go index 19a7f0bf9..d378beb60 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -303,7 +303,6 @@ func (n *Manager) nextBatch() []*Alert { // Run dispatches notifications continuously. func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { - for { select { case <-n.ctx.Done(): diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index b04b07e94..807c6bc7f 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -40,7 +40,7 @@ import ( ) func TestPostPath(t *testing.T) { - var cases = []struct { + cases := []struct { in, out string }{ { @@ -456,7 +456,7 @@ func TestLabelSetNotReused(t *testing.T) { } func TestReload(t *testing.T) { - var tests = []struct { + tests := []struct { in *targetgroup.Group out string }{ @@ -500,11 +500,10 @@ alerting: require.Equal(t, tt.out, res) } - } func TestDroppedAlertmanagers(t *testing.T) { - var tests = []struct { + tests := []struct { in *targetgroup.Group out string }{ diff --git a/pkg/labels/labels_test.go b/pkg/labels/labels_test.go index 57f28224e..2d5c2ed37 100644 --- a/pkg/labels/labels_test.go +++ b/pkg/labels/labels_test.go @@ -704,7 +704,7 @@ func BenchmarkLabels_Hash(b *testing.B) { lbls: func() Labels { lbls := make(Labels, 10) for i := 0; i < len(lbls); i++ { - //Label ~50B name, 50B value. + // Label ~50B name, 50B value. lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)} } return lbls diff --git a/pkg/logging/file.go b/pkg/logging/file.go index 3c0c3e3b0..6b5751b01 100644 --- a/pkg/logging/file.go +++ b/pkg/logging/file.go @@ -21,11 +21,9 @@ import ( "github.com/pkg/errors" ) -var ( - timestampFormat = log.TimestampFormat( - func() time.Time { return time.Now().UTC() }, - "2006-01-02T15:04:05.000Z07:00", - ) +var timestampFormat = log.TimestampFormat( + func() time.Time { return time.Now().UTC() }, + "2006-01-02T15:04:05.000Z07:00", ) // JSONFileLogger represents a logger that writes JSON to a file. @@ -40,7 +38,7 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { return nil, nil } - f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666) + f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { return nil, errors.Wrap(err, "can't create json logger") } diff --git a/pkg/runtime/statfs_default.go b/pkg/runtime/statfs_default.go index a493a5cbb..f850f2cd6 100644 --- a/pkg/runtime/statfs_default.go +++ b/pkg/runtime/statfs_default.go @@ -23,7 +23,6 @@ import ( // Statfs returns the file system type (Unix only) func Statfs(path string) string { - // Types of file systems that may be returned by `statfs` fsTypes := map[int64]string{ 0xadf5: "ADFS_SUPER_MAGIC", diff --git a/pkg/runtime/statfs_linux_386.go b/pkg/runtime/statfs_linux_386.go index b45eecdd3..7494a0adf 100644 --- a/pkg/runtime/statfs_linux_386.go +++ b/pkg/runtime/statfs_linux_386.go @@ -23,7 +23,6 @@ import ( // Statfs returns the file system type (Unix only) func Statfs(path string) string { - // Types of file systems that may be returned by `statfs` fsTypes := map[int32]string{ 0xadf5: "ADFS_SUPER_MAGIC", diff --git a/pkg/runtime/statfs_uint32.go b/pkg/runtime/statfs_uint32.go index fa10ebc96..72d670a39 100644 --- a/pkg/runtime/statfs_uint32.go +++ b/pkg/runtime/statfs_uint32.go @@ -23,7 +23,6 @@ import ( // Statfs returns the file system type (Unix only) func Statfs(path string) string { - // Types of file systems that may be returned by `statfs` fsTypes := map[uint32]string{ 0xadf5: "ADFS_SUPER_MAGIC", diff --git a/pkg/textparse/promparse_test.go b/pkg/textparse/promparse_test.go index 1676ccc13..653f0dd05 100644 --- a/pkg/textparse/promparse_test.go +++ b/pkg/textparse/promparse_test.go @@ -457,9 +457,7 @@ func BenchmarkParse(b *testing.B) { total := 0 for i := 0; i < b.N; i += promtestdataSampleCount { - var ( - decSamples = make(model.Vector, 0, 50) - ) + decSamples := make(model.Vector, 0, 50) sdec := expfmt.SampleDecoder{ Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.FmtText), Opts: &expfmt.DecodeOptions{ @@ -480,6 +478,7 @@ func BenchmarkParse(b *testing.B) { } } } + func BenchmarkGzip(b *testing.B) { for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { b.Run(fn, func(b *testing.B) { diff --git a/promql/engine.go b/promql/engine.go index bfa575cac..5be7ef3ee 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -87,12 +87,15 @@ type ( func (e ErrQueryTimeout) Error() string { return fmt.Sprintf("query timed out in %s", string(e)) } + func (e ErrQueryCanceled) Error() string { return fmt.Sprintf("query was canceled in %s", string(e)) } + func (e ErrTooManySamples) Error() string { return fmt.Sprintf("query processing would load too many samples into memory in %s", string(e)) } + func (e ErrStorage) Error() string { return e.Err.Error() } @@ -402,8 +405,10 @@ func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end tim return qry, nil } -var ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled") -var ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled") +var ( + ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled") + ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled") +) func (ng *Engine) validateOpts(expr parser.Expr) error { if ng.enableAtModifier && ng.enableNegativeOffset { @@ -2139,7 +2144,6 @@ type groupedAggregation struct { // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels // must be sorted. func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector { - result := map[uint64]*groupedAggregation{} orderedResult := []*groupedAggregation{} var k int64 @@ -2509,7 +2513,6 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { } if isStepInvariant { - // The function and all arguments are step invariant. return true } @@ -2559,7 +2562,6 @@ func newStepInvariantExpr(expr parser.Expr) parser.Expr { // Wrapping the inside of () makes it easy to unwrap the paren later. // But this effectively unwraps the paren. return newStepInvariantExpr(e.Expr) - } return &parser.StepInvariantExpr{Expr: expr} } diff --git a/promql/engine_test.go b/promql/engine_test.go index 7ac6ae1d2..0986a22e5 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -181,9 +181,11 @@ type errQuerier struct { func (q *errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { return errSeriesSet{err: q.err} } + func (*errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { return nil, nil, nil } + func (*errQuerier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { return nil, nil, nil } @@ -267,277 +269,278 @@ func TestSelectHintsSetCorrectly(t *testing.T) { // TODO(bwplotka): Add support for better hints when subquerying. expected []*storage.SelectHints - }{{ - query: "foo", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000}, - }, - }, { - query: "foo @ 15", start: 10000, - expected: []*storage.SelectHints{ - {Start: 10000, End: 15000}, - }, - }, { - query: "foo @ 1", start: 10000, - expected: []*storage.SelectHints{ - {Start: -4000, End: 1000}, - }, - }, { - query: "foo[2m]", start: 200000, - expected: []*storage.SelectHints{ - {Start: 80000, End: 200000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 180", start: 200000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 300", start: 200000, - expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 60", start: 200000, - expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000}, - }, - }, { - query: "foo[2m] offset 2m", start: 300000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 200 offset 2m", start: 300000, - expected: []*storage.SelectHints{ - {Start: -40000, End: 80000, Range: 120000}, - }, - }, { - query: "foo[2m:1s]", start: 300000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000}, - }, - }, { - query: "count_over_time(foo[2m:1s])", start: 300000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, - expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, - expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 165000, End: 290000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 155000, End: 280000, Func: "count_over_time"}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time"}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time"}, - }, - }, { + }{ + { + query: "foo", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000}, + }, + }, { + query: "foo @ 15", start: 10000, + expected: []*storage.SelectHints{ + {Start: 10000, End: 15000}, + }, + }, { + query: "foo @ 1", start: 10000, + expected: []*storage.SelectHints{ + {Start: -4000, End: 1000}, + }, + }, { + query: "foo[2m]", start: 200000, + expected: []*storage.SelectHints{ + {Start: 80000, End: 200000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 180", start: 200000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 180000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 300", start: 200000, + expected: []*storage.SelectHints{ + {Start: 180000, End: 300000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 60", start: 200000, + expected: []*storage.SelectHints{ + {Start: -60000, End: 60000, Range: 120000}, + }, + }, { + query: "foo[2m] offset 2m", start: 300000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 180000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 200 offset 2m", start: 300000, + expected: []*storage.SelectHints{ + {Start: -40000, End: 80000, Range: 120000}, + }, + }, { + query: "foo[2m:1s]", start: 300000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000}, + }, + }, { + query: "count_over_time(foo[2m:1s])", start: 300000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, + expected: []*storage.SelectHints{ + {Start: 75000, End: 200000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, + expected: []*storage.SelectHints{ + {Start: -25000, End: 100000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 165000, End: 290000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 155000, End: 280000, Func: "count_over_time"}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time"}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: -45000, End: 80000, Func: "count_over_time"}, + }, + }, { - query: "foo", start: 10000, end: 20000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 20000, Step: 1000}, + query: "foo", start: 10000, end: 20000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 20000, Step: 1000}, + }, + }, { + query: "foo @ 15", start: 10000, end: 20000, + expected: []*storage.SelectHints{ + {Start: 10000, End: 15000, Step: 1000}, + }, + }, { + query: "foo @ 1", start: 10000, end: 20000, + expected: []*storage.SelectHints{ + {Start: -4000, End: 1000, Step: 1000}, + }, + }, { + query: "rate(foo[2m] @ 180)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m] @ 300)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m] @ 60)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m])", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m:1s])", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 500000, Func: "rate", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 500000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 165000, End: 490000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 155000, End: 480000, Func: "count_over_time", Step: 1000}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "sum by (dim1) (foo)", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, + }, + }, { + query: "sum without (dim1) (foo)", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, + }, + }, { + query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000, + expected: []*storage.SelectHints{ + {Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000}, + }, + }, { + query: "sum by (dim1) (max by (dim2) (foo))", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, + }, + }, { + query: "(max by (dim1) (foo))[5s:1s]", start: 10000, + expected: []*storage.SelectHints{ + {Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}}, + }, + }, { + query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000, + expected: []*storage.SelectHints{ + {Start: 95000, End: 120000, Func: "sum", By: true}, + {Start: 95000, End: 120000, Func: "max", By: true}, + }, + }, { + query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 45000, End: 50000, Step: 1000}, + {Start: 245000, End: 250000, Step: 1000}, + {Start: 895000, End: 900000, Step: 1000}, + }, + }, { + query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 45000, End: 50000, Step: 1000}, + {Start: 95000, End: 500000, Step: 1000}, + {Start: 895000, End: 900000, Step: 1000}, + }, + }, { + query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000}, + {Start: 245000, End: 250000, Step: 1000}, + {Start: 895000, End: 900000, Step: 1000}, + }, + }, { + query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95000, End: 500000, Step: 1000}, + {Start: 95000, End: 500000, Step: 1000}, + }, + }, { + query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95000, End: 500000, Step: 1000}, + {Start: 655000, End: 780000, Step: 1000, Func: "rate"}, + }, + }, { // Hints are based on the inner most subquery timestamp. + query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000, + expected: []*storage.SelectHints{ + {Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time"}, + }, + }, { // Hints are based on the inner most subquery timestamp. + query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`, + expected: []*storage.SelectHints{ + {Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time"}, + }, }, - }, { - query: "foo @ 15", start: 10000, end: 20000, - expected: []*storage.SelectHints{ - {Start: 10000, End: 15000, Step: 1000}, - }, - }, { - query: "foo @ 1", start: 10000, end: 20000, - expected: []*storage.SelectHints{ - {Start: -4000, End: 1000, Step: 1000}, - }, - }, { - query: "rate(foo[2m] @ 180)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m] @ 300)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m] @ 60)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m])", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m:1s])", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "rate", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 165000, End: 490000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 155000, End: 480000, Func: "count_over_time", Step: 1000}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "sum by (dim1) (foo)", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, - }, - }, { - query: "sum without (dim1) (foo)", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, - }, - }, { - query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000, - expected: []*storage.SelectHints{ - {Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000}, - }, - }, { - query: "sum by (dim1) (max by (dim2) (foo))", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, - }, - }, { - query: "(max by (dim1) (foo))[5s:1s]", start: 10000, - expected: []*storage.SelectHints{ - {Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}}, - }, - }, { - query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000, - expected: []*storage.SelectHints{ - {Start: 95000, End: 120000, Func: "sum", By: true}, - {Start: 95000, End: 120000, Func: "max", By: true}, - }, - }, { - query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, - }, - }, { - query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, - }, - }, { - query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, - }, - }, { - query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, - }, - }, { - query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 655000, End: 780000, Step: 1000, Func: "rate"}, - }, - }, { // Hints are based on the inner most subquery timestamp. - query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000, - expected: []*storage.SelectHints{ - {Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time"}, - }, - }, { // Hints are based on the inner most subquery timestamp. - query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`, - expected: []*storage.SelectHints{ - {Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time"}, - }, - }, } { t.Run(tc.query, func(t *testing.T) { engine := NewEngine(opts) @@ -559,7 +562,6 @@ func TestSelectHintsSetCorrectly(t *testing.T) { require.Equal(t, tc.expected, hintsRecorder.hints) }) - } } @@ -645,25 +647,31 @@ load 10s { Query: "metric", Result: Vector{ - Sample{Point: Point{V: 1, T: 1000}, - Metric: labels.FromStrings("__name__", "metric")}, + Sample{ + Point: Point{V: 1, T: 1000}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(1, 0), }, { Query: "metric[20s]", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(10, 0), }, // Range queries. { Query: "1", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, - Metric: labels.FromStrings()}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, + Metric: labels.FromStrings(), + }, }, Start: time.Unix(0, 0), End: time.Unix(2, 0), @@ -671,9 +679,11 @@ load 10s }, { Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(0, 0), End: time.Unix(2, 0), @@ -681,9 +691,11 @@ load 10s }, { Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(0, 0), End: time.Unix(10, 0), @@ -743,23 +755,28 @@ load 10s Query: "1", MaxSamples: 1, Start: time.Unix(1, 0), - }, { + }, + { Query: "metric", MaxSamples: 1, Start: time.Unix(1, 0), - }, { + }, + { Query: "metric[20s]", MaxSamples: 2, Start: time.Unix(10, 0), - }, { + }, + { Query: "rate(metric[20s])", MaxSamples: 3, Start: time.Unix(10, 0), - }, { + }, + { Query: "metric[20s:5s]", MaxSamples: 3, Start: time.Unix(10, 0), - }, { + }, + { Query: "metric[20s] @ 10", MaxSamples: 2, Start: time.Unix(0, 0), @@ -771,38 +788,44 @@ load 10s Start: time.Unix(0, 0), End: time.Unix(2, 0), Interval: time.Second, - }, { + }, + { Query: "1", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(2, 0), Interval: time.Second, - }, { + }, + { Query: "metric", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(2, 0), Interval: time.Second, - }, { + }, + { Query: "metric", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { Query: "rate(bigmetric[1s])", MaxSamples: 1, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // Result is duplicated, so @ also produces 3 samples. Query: "metric @ 10", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // The peak samples in memory is during the first evaluation: // - Subquery takes 22 samples, 11 for each bigmetric, // - Result is calculated per series where the series samples is buffered, hence 11 more here. @@ -814,7 +837,8 @@ load 10s Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // Here the reasoning is same as above. But LHS and RHS are done one after another. // So while one of them takes 35 samples at peak, we need to hold the 2 sample // result of the other till then. @@ -823,7 +847,8 @@ load 10s Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. // Hence at peak it is 2*21 (subquery from 0s to 20s) @@ -834,14 +859,16 @@ load 10s Start: time.Unix(10, 0), End: time.Unix(20, 0), Interval: 5 * time.Second, - }, { + }, + { // Nested subquery. // We saw that innermost rate takes 35 samples which is still the peak // since the other two subqueries just duplicate the result. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, MaxSamples: 35, Start: time.Unix(10, 0), - }, { + }, + { // Nested subquery. // Now the outmost subquery produces more samples than inner most rate. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`, @@ -1177,9 +1204,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:10s]", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1189,9 +1218,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s]", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1201,9 +1232,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 2s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1213,9 +1246,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 6s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1225,9 +1260,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 4s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1237,9 +1274,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 5s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1249,9 +1288,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 6s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1261,9 +1302,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 7s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1282,9 +1325,11 @@ func TestSubquerySelector(t *testing.T) { Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 9990, T: 9990000}, {V: 10000, T: 10000000}, {V: 100, T: 10010000}, {V: 130, T: 10020000}}, - Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production")}, + Matrix{ + Series{ + Points: []Point{{V: 9990, T: 9990000}, {V: 10000, T: 10000000}, {V: 100, T: 10010000}, {V: 130, T: 10020000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, }, nil, }, @@ -1294,9 +1339,11 @@ func TestSubquerySelector(t *testing.T) { Query: `http_requests{group=~"pro.*",instance="0"}[5m:]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 9840, T: 9840000}, {V: 9900, T: 9900000}, {V: 9960, T: 9960000}, {V: 130, T: 10020000}, {V: 310, T: 10080000}}, - Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production")}, + Matrix{ + Series{ + Points: []Point{{V: 9840, T: 9840000}, {V: 9900, T: 9900000}, {V: 9960, T: 9960000}, {V: 130, T: 10020000}, {V: 310, T: 10080000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, }, nil, }, @@ -1306,9 +1353,11 @@ func TestSubquerySelector(t *testing.T) { Query: `http_requests{group=~"pro.*",instance="0"}[5m:] offset 20m`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 8640, T: 8640000}, {V: 8700, T: 8700000}, {V: 8760, T: 8760000}, {V: 8820, T: 8820000}, {V: 8880, T: 8880000}}, - Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production")}, + Matrix{ + Series{ + Points: []Point{{V: 8640, T: 8640000}, {V: 8700, T: 8700000}, {V: 8760, T: 8760000}, {V: 8820, T: 8820000}, {V: 8880, T: 8880000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, }, nil, }, @@ -1344,9 +1393,11 @@ func TestSubquerySelector(t *testing.T) { Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 270, T: 90000}, {V: 300, T: 100000}, {V: 330, T: 110000}, {V: 360, T: 120000}}, - Metric: labels.Labels{}}, + Matrix{ + Series{ + Points: []Point{{V: 270, T: 90000}, {V: 300, T: 100000}, {V: 330, T: 110000}, {V: 360, T: 120000}}, + Metric: labels.Labels{}, + }, }, nil, }, @@ -1356,9 +1407,11 @@ func TestSubquerySelector(t *testing.T) { Query: `sum(http_requests)[40s:10s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 800, T: 80000}, {V: 900, T: 90000}, {V: 1000, T: 100000}, {V: 1100, T: 110000}, {V: 1200, T: 120000}}, - Metric: labels.Labels{}}, + Matrix{ + Series{ + Points: []Point{{V: 800, T: 80000}, {V: 900, T: 90000}, {V: 1000, T: 100000}, {V: 1100, T: 110000}, {V: 1200, T: 120000}}, + Metric: labels.Labels{}, + }, }, nil, }, @@ -1368,9 +1421,11 @@ func TestSubquerySelector(t *testing.T) { Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1000, T: 100000}, {V: 1000, T: 105000}, {V: 1100, T: 110000}, {V: 1100, T: 115000}, {V: 1200, T: 120000}}, - Metric: labels.Labels{}}, + Matrix{ + Series{ + Points: []Point{{V: 1000, T: 100000}, {V: 1000, T: 105000}, {V: 1100, T: 110000}, {V: 1100, T: 115000}, {V: 1200, T: 120000}}, + Metric: labels.Labels{}, + }, }, nil, }, @@ -1535,7 +1590,7 @@ func TestQueryLogger_error(t *testing.T) { func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { startTime := time.Unix(1000, 0) endTime := time.Unix(9999, 0) - var testCases = []struct { + testCases := []struct { input string // The input to be parsed. expected parser.Expr // The expected expression AST. }{ @@ -2337,9 +2392,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), @@ -2350,9 +2407,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000 0 0 0 0`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), @@ -2363,9 +2422,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000 10000 100000 1000000 10000000`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}, {V: 110000, T: 180000}, {V: 11000000, T: 240000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}, {V: 110000, T: 180000}, {V: 11000000, T: 240000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(240, 0), @@ -2376,9 +2437,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 5 17 42 2 7 905 51`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 5, T: 0}, {V: 59, T: 60000}, {V: 9, T: 120000}, {V: 956, T: 180000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 5, T: 0}, {V: 59, T: 60000}, {V: 9, T: 120000}, {V: 956, T: 180000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(180, 0), @@ -2389,9 +2452,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s metric 1+1x4`, Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, - Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), @@ -2402,9 +2467,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s metric 1+1x8`, Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, - Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), diff --git a/promql/functions.go b/promql/functions.go index 49ff09678..19bcc6449 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -56,7 +56,7 @@ func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector { +func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) Vector { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 776242564..e9fcdae08 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -407,7 +407,7 @@ type PositionRange struct { // mergeRanges is a helper function to merge the PositionRanges of two Nodes. // Note that the arguments must be in the same order as they // occur in the input string. -func mergeRanges(first Node, last Node) PositionRange { +func mergeRanges(first, last Node) PositionRange { return PositionRange{ Start: first.PositionRange().Start, End: last.PositionRange().End, @@ -426,15 +426,19 @@ func (i *Item) PositionRange() PositionRange { func (e *AggregateExpr) PositionRange() PositionRange { return e.PosRange } + func (e *BinaryExpr) PositionRange() PositionRange { return mergeRanges(e.LHS, e.RHS) } + func (e *Call) PositionRange() PositionRange { return e.PosRange } + func (e *EvalStmt) PositionRange() PositionRange { return e.Expr.PositionRange() } + func (e Expressions) PositionRange() PositionRange { if len(e) == 0 { // Position undefined. @@ -445,33 +449,40 @@ func (e Expressions) PositionRange() PositionRange { } return mergeRanges(e[0], e[len(e)-1]) } + func (e *MatrixSelector) PositionRange() PositionRange { return PositionRange{ Start: e.VectorSelector.PositionRange().Start, End: e.EndPos, } } + func (e *SubqueryExpr) PositionRange() PositionRange { return PositionRange{ Start: e.Expr.PositionRange().Start, End: e.EndPos, } } + func (e *NumberLiteral) PositionRange() PositionRange { return e.PosRange } + func (e *ParenExpr) PositionRange() PositionRange { return e.PosRange } + func (e *StringLiteral) PositionRange() PositionRange { return e.PosRange } + func (e *UnaryExpr) PositionRange() PositionRange { return PositionRange{ Start: e.StartPos, End: e.Expr.PositionRange().End, } } + func (e *VectorSelector) PositionRange() PositionRange { return e.PosRange } diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 71614913a..71bce62bc 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -33,82 +33,84 @@ type yySymType struct { duration time.Duration } -const EQL = 57346 -const BLANK = 57347 -const COLON = 57348 -const COMMA = 57349 -const COMMENT = 57350 -const DURATION = 57351 -const EOF = 57352 -const ERROR = 57353 -const IDENTIFIER = 57354 -const LEFT_BRACE = 57355 -const LEFT_BRACKET = 57356 -const LEFT_PAREN = 57357 -const METRIC_IDENTIFIER = 57358 -const NUMBER = 57359 -const RIGHT_BRACE = 57360 -const RIGHT_BRACKET = 57361 -const RIGHT_PAREN = 57362 -const SEMICOLON = 57363 -const SPACE = 57364 -const STRING = 57365 -const TIMES = 57366 -const operatorsStart = 57367 -const ADD = 57368 -const DIV = 57369 -const EQLC = 57370 -const EQL_REGEX = 57371 -const GTE = 57372 -const GTR = 57373 -const LAND = 57374 -const LOR = 57375 -const LSS = 57376 -const LTE = 57377 -const LUNLESS = 57378 -const MOD = 57379 -const MUL = 57380 -const NEQ = 57381 -const NEQ_REGEX = 57382 -const POW = 57383 -const SUB = 57384 -const AT = 57385 -const ATAN2 = 57386 -const operatorsEnd = 57387 -const aggregatorsStart = 57388 -const AVG = 57389 -const BOTTOMK = 57390 -const COUNT = 57391 -const COUNT_VALUES = 57392 -const GROUP = 57393 -const MAX = 57394 -const MIN = 57395 -const QUANTILE = 57396 -const STDDEV = 57397 -const STDVAR = 57398 -const SUM = 57399 -const TOPK = 57400 -const aggregatorsEnd = 57401 -const keywordsStart = 57402 -const BOOL = 57403 -const BY = 57404 -const GROUP_LEFT = 57405 -const GROUP_RIGHT = 57406 -const IGNORING = 57407 -const OFFSET = 57408 -const ON = 57409 -const WITHOUT = 57410 -const keywordsEnd = 57411 -const preprocessorStart = 57412 -const START = 57413 -const END = 57414 -const preprocessorEnd = 57415 -const startSymbolsStart = 57416 -const START_METRIC = 57417 -const START_SERIES_DESCRIPTION = 57418 -const START_EXPRESSION = 57419 -const START_METRIC_SELECTOR = 57420 -const startSymbolsEnd = 57421 +const ( + EQL = 57346 + BLANK = 57347 + COLON = 57348 + COMMA = 57349 + COMMENT = 57350 + DURATION = 57351 + EOF = 57352 + ERROR = 57353 + IDENTIFIER = 57354 + LEFT_BRACE = 57355 + LEFT_BRACKET = 57356 + LEFT_PAREN = 57357 + METRIC_IDENTIFIER = 57358 + NUMBER = 57359 + RIGHT_BRACE = 57360 + RIGHT_BRACKET = 57361 + RIGHT_PAREN = 57362 + SEMICOLON = 57363 + SPACE = 57364 + STRING = 57365 + TIMES = 57366 + operatorsStart = 57367 + ADD = 57368 + DIV = 57369 + EQLC = 57370 + EQL_REGEX = 57371 + GTE = 57372 + GTR = 57373 + LAND = 57374 + LOR = 57375 + LSS = 57376 + LTE = 57377 + LUNLESS = 57378 + MOD = 57379 + MUL = 57380 + NEQ = 57381 + NEQ_REGEX = 57382 + POW = 57383 + SUB = 57384 + AT = 57385 + ATAN2 = 57386 + operatorsEnd = 57387 + aggregatorsStart = 57388 + AVG = 57389 + BOTTOMK = 57390 + COUNT = 57391 + COUNT_VALUES = 57392 + GROUP = 57393 + MAX = 57394 + MIN = 57395 + QUANTILE = 57396 + STDDEV = 57397 + STDVAR = 57398 + SUM = 57399 + TOPK = 57400 + aggregatorsEnd = 57401 + keywordsStart = 57402 + BOOL = 57403 + BY = 57404 + GROUP_LEFT = 57405 + GROUP_RIGHT = 57406 + IGNORING = 57407 + OFFSET = 57408 + ON = 57409 + WITHOUT = 57410 + keywordsEnd = 57411 + preprocessorStart = 57412 + START = 57413 + END = 57414 + preprocessorEnd = 57415 + startSymbolsStart = 57416 + START_METRIC = 57417 + START_SERIES_DESCRIPTION = 57418 + START_EXPRESSION = 57419 + START_METRIC_SELECTOR = 57420 + startSymbolsEnd = 57421 +) var yyToknames = [...]string{ "$end", @@ -194,9 +196,11 @@ var yyToknames = [...]string{ var yyStatenames = [...]string{} -const yyEofCode = 1 -const yyErrCode = 2 -const yyInitialStackSize = 16 +const ( + yyEofCode = 1 + yyErrCode = 2 + yyInitialStackSize = 16 +) //line promql/parser/generated_parser.y:749 diff --git a/promql/parser/lex_test.go b/promql/parser/lex_test.go index 95f4d978d..8e22f41d9 100644 --- a/promql/parser/lex_test.go +++ b/promql/parser/lex_test.go @@ -318,25 +318,32 @@ var tests = []struct { { input: "offset", expected: []Item{{OFFSET, 0, "offset"}}, - }, { + }, + { input: "by", expected: []Item{{BY, 0, "by"}}, - }, { + }, + { input: "without", expected: []Item{{WITHOUT, 0, "without"}}, - }, { + }, + { input: "on", expected: []Item{{ON, 0, "on"}}, - }, { + }, + { input: "ignoring", expected: []Item{{IGNORING, 0, "ignoring"}}, - }, { + }, + { input: "group_left", expected: []Item{{GROUP_LEFT, 0, "group_left"}}, - }, { + }, + { input: "group_right", expected: []Item{{GROUP_RIGHT, 0, "group_right"}}, - }, { + }, + { input: "bool", expected: []Item{{BOOL, 0, "bool"}}, }, @@ -569,7 +576,8 @@ var tests = []struct { {DURATION, 24, `4s`}, {RIGHT_BRACKET, 26, `]`}, }, - }, { + }, + { input: `test:name{on!~"b:ar"}[4m:4s]`, expected: []Item{ {METRIC_IDENTIFIER, 0, `test:name`}, @@ -584,7 +592,8 @@ var tests = []struct { {DURATION, 25, `4s`}, {RIGHT_BRACKET, 27, `]`}, }, - }, { + }, + { input: `test:name{on!~"b:ar"}[4m:]`, expected: []Item{ {METRIC_IDENTIFIER, 0, `test:name`}, @@ -598,7 +607,8 @@ var tests = []struct { {COLON, 24, `:`}, {RIGHT_BRACKET, 25, `]`}, }, - }, { // Nested Subquery. + }, + { // Nested Subquery. input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:])[4m:3s]`, expected: []Item{ @@ -646,7 +656,8 @@ var tests = []struct { {OFFSET, 29, "offset"}, {DURATION, 36, "10m"}, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] offset 6m)[4m:3s]`, expected: []Item{ @@ -737,7 +748,6 @@ func TestLexer(t *testing.T) { if item.Typ == ERROR { hasError = true } - } if !hasError { t.Logf("%d: input %q", i, test.input) diff --git a/promql/parser/parse.go b/promql/parser/parse.go index edecfc0e9..2c44c3c2c 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -241,7 +241,7 @@ func (p *parser) addParseErr(positionRange PositionRange, err error) { // unexpected creates a parser error complaining about an unexpected lexer item. // The item that is presented as unexpected is always the last item produced // by the lexer. -func (p *parser) unexpected(context string, expected string) { +func (p *parser) unexpected(context, expected string) { var errMsg strings.Builder // Do not report lexer errors twice @@ -354,7 +354,8 @@ func (p *parser) InjectItem(typ ItemType) { p.inject = typ p.injecting = true } -func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers Node, rhs Node) *BinaryExpr { + +func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr { ret := modifiers.(*BinaryExpr) ret.LHS = lhs.(Expr) @@ -374,7 +375,7 @@ func (p *parser) assembleVectorSelector(vs *VectorSelector) { } } -func (p *parser) newAggregateExpr(op Item, modifier Node, args Node) (ret *AggregateExpr) { +func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateExpr) { ret = modifier.(*AggregateExpr) arguments := args.(Expressions) @@ -650,10 +651,9 @@ func (p *parser) parseGenerated(startSymbol ItemType) interface{} { p.yyParser.Parse(p) return p.generatedParserResult - } -func (p *parser) newLabelMatcher(label Item, operator Item, value Item) *labels.Matcher { +func (p *parser) newLabelMatcher(label, operator, value Item) *labels.Matcher { op := operator.Typ val := p.unquoteString(value.Val) diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index b73dfd22a..99cc924e0 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -40,73 +40,85 @@ var testExpr = []struct { Val: 1, PosRange: PositionRange{Start: 0, End: 1}, }, - }, { + }, + { input: "+Inf", expected: &NumberLiteral{ Val: math.Inf(1), PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: "-Inf", expected: &NumberLiteral{ Val: math.Inf(-1), PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: ".5", expected: &NumberLiteral{ Val: 0.5, PosRange: PositionRange{Start: 0, End: 2}, }, - }, { + }, + { input: "5.", expected: &NumberLiteral{ Val: 5, PosRange: PositionRange{Start: 0, End: 2}, }, - }, { + }, + { input: "123.4567", expected: &NumberLiteral{ Val: 123.4567, PosRange: PositionRange{Start: 0, End: 8}, }, - }, { + }, + { input: "5e-3", expected: &NumberLiteral{ Val: 0.005, PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: "5e3", expected: &NumberLiteral{ Val: 5000, PosRange: PositionRange{Start: 0, End: 3}, }, - }, { + }, + { input: "0xc", expected: &NumberLiteral{ Val: 12, PosRange: PositionRange{Start: 0, End: 3}, }, - }, { + }, + { input: "0755", expected: &NumberLiteral{ Val: 493, PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: "+5.5e-3", expected: &NumberLiteral{ Val: 0.0055, PosRange: PositionRange{Start: 0, End: 7}, }, - }, { + }, + { input: "-0755", expected: &NumberLiteral{ Val: -493, PosRange: PositionRange{Start: 0, End: 5}, }, - }, { + }, + { input: "1 + 1", expected: &BinaryExpr{ Op: ADD, @@ -119,7 +131,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 - 1", expected: &BinaryExpr{ Op: SUB, @@ -132,7 +145,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 * 1", expected: &BinaryExpr{ Op: MUL, @@ -145,7 +159,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 % 1", expected: &BinaryExpr{ Op: MOD, @@ -158,7 +173,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 / 1", expected: &BinaryExpr{ Op: DIV, @@ -171,7 +187,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 == bool 1", expected: &BinaryExpr{ Op: EQLC, @@ -185,7 +202,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 != bool 1", expected: &BinaryExpr{ Op: NEQ, @@ -199,7 +217,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 > bool 1", expected: &BinaryExpr{ Op: GTR, @@ -213,7 +232,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 >= bool 1", expected: &BinaryExpr{ Op: GTE, @@ -227,7 +247,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 < bool 1", expected: &BinaryExpr{ Op: LSS, @@ -241,7 +262,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 <= bool 1", expected: &BinaryExpr{ Op: LTE, @@ -255,7 +277,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "-1^2", expected: &UnaryExpr{ Op: SUB, @@ -271,7 +294,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "-1*2", expected: &BinaryExpr{ Op: MUL, @@ -284,7 +308,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 3, End: 4}, }, }, - }, { + }, + { input: "-1+2", expected: &BinaryExpr{ Op: ADD, @@ -297,7 +322,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 3, End: 4}, }, }, - }, { + }, + { input: "-1^-2", expected: &UnaryExpr{ Op: SUB, @@ -313,7 +339,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "+1 + -2 * 1", expected: &BinaryExpr{ Op: ADD, @@ -333,7 +360,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "1 + 2/(3*1)", expected: &BinaryExpr{ Op: ADD, @@ -363,7 +391,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "1 < bool 2 - 1 * 2", expected: &BinaryExpr{ Op: LSS, @@ -391,7 +420,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "-some_metric", expected: &UnaryExpr{ Op: SUB, @@ -406,7 +436,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "+some_metric", expected: &UnaryExpr{ Op: ADD, @@ -421,7 +452,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: " +some_metric", expected: &UnaryExpr{ Op: ADD, @@ -437,103 +469,128 @@ var testExpr = []struct { }, StartPos: 1, }, - }, { + }, + { input: "", fail: true, errMsg: "no expression found in input", - }, { + }, + { input: "# just a comment\n\n", fail: true, errMsg: "no expression found in input", - }, { + }, + { input: "1+", fail: true, errMsg: "unexpected end of input", - }, { + }, + { input: ".", fail: true, errMsg: "unexpected character: '.'", - }, { + }, + { input: "2.5.", fail: true, errMsg: "unexpected character: '.'", - }, { + }, + { input: "100..4", fail: true, errMsg: `unexpected number ".4"`, - }, { + }, + { input: "0deadbeef", fail: true, errMsg: "bad number or duration syntax: \"0de\"", - }, { + }, + { input: "1 /", fail: true, errMsg: "unexpected end of input", - }, { + }, + { input: "*1", fail: true, errMsg: "unexpected ", - }, { + }, + { input: "(1))", fail: true, errMsg: "unexpected right parenthesis ')'", - }, { + }, + { input: "((1)", fail: true, errMsg: "unclosed left parenthesis", - }, { + }, + { input: "999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999", fail: true, errMsg: "out of range", - }, { + }, + { input: "(", fail: true, errMsg: "unclosed left parenthesis", - }, { + }, + { input: "1 and 1", fail: true, errMsg: "set operator \"and\" not allowed in binary scalar expression", - }, { + }, + { input: "1 == 1", fail: true, errMsg: "1:3: parse error: comparisons between scalars must use BOOL modifier", - }, { + }, + { input: "1 or 1", fail: true, errMsg: "set operator \"or\" not allowed in binary scalar expression", - }, { + }, + { input: "1 unless 1", fail: true, errMsg: "set operator \"unless\" not allowed in binary scalar expression", - }, { + }, + { input: "1 !~ 1", fail: true, errMsg: `unexpected character after '!': '~'`, - }, { + }, + { input: "1 =~ 1", fail: true, errMsg: `unexpected character after '=': '~'`, - }, { + }, + { input: `-"string"`, fail: true, errMsg: `unary expression only allowed on expressions of type scalar or instant vector, got "string"`, - }, { + }, + { input: `-test[5m]`, fail: true, errMsg: `unary expression only allowed on expressions of type scalar or instant vector, got "range vector"`, - }, { + }, + { input: `*test`, fail: true, errMsg: "unexpected ", - }, { + }, + { input: "1 offset 1d", fail: true, errMsg: "1:1: parse error: offset modifier must be preceded by an instant vector selector or range vector selector or a subquery", - }, { + }, + { input: "foo offset 1s offset 2s", fail: true, errMsg: "offset may not be set multiple times", - }, { + }, + { input: "a - on(b) ignoring(c) d", fail: true, errMsg: "1:11: parse error: unexpected ", @@ -565,7 +622,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardOneToOne}, }, - }, { + }, + { input: "foo * sum", expected: &BinaryExpr{ Op: MUL, @@ -591,7 +649,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardOneToOne}, }, - }, { + }, + { input: "foo == 1", expected: &BinaryExpr{ Op: EQLC, @@ -610,7 +669,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 7, End: 8}, }, }, - }, { + }, + { input: "foo == bool 1", expected: &BinaryExpr{ Op: EQLC, @@ -630,7 +690,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "2.5 / bar", expected: &BinaryExpr{ Op: DIV, @@ -649,7 +710,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "foo and bar", expected: &BinaryExpr{ Op: LAND, @@ -675,7 +737,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { input: "foo or bar", expected: &BinaryExpr{ Op: LOR, @@ -701,7 +764,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { input: "foo unless bar", expected: &BinaryExpr{ Op: LUNLESS, @@ -727,7 +791,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { // Test and/or precedence and reassigning of operands. input: "foo + bar or bla and blub", expected: &BinaryExpr{ @@ -782,7 +847,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { // Test and/or/unless precedence. input: "foo and bar unless baz or qux", expected: &BinaryExpr{ @@ -837,7 +903,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { // Test precedence and reassigning of operands. input: "bar + on(foo) bla / on(baz, buz) group_right(test) blub", expected: &BinaryExpr{ @@ -887,7 +954,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo * on(test,blub) bar", expected: &BinaryExpr{ Op: MUL, @@ -917,7 +985,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo * on(test,blub) group_left bar", expected: &BinaryExpr{ Op: MUL, @@ -947,7 +1016,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo and on(test,blub) bar", expected: &BinaryExpr{ Op: LAND, @@ -977,7 +1047,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo and on() bar", expected: &BinaryExpr{ Op: LAND, @@ -1007,7 +1078,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo and ignoring(test,blub) bar", expected: &BinaryExpr{ Op: LAND, @@ -1036,7 +1108,8 @@ var testExpr = []struct { MatchingLabels: []string{"test", "blub"}, }, }, - }, { + }, + { input: "foo and ignoring() bar", expected: &BinaryExpr{ Op: LAND, @@ -1065,7 +1138,8 @@ var testExpr = []struct { MatchingLabels: []string{}, }, }, - }, { + }, + { input: "foo unless on(bar) baz", expected: &BinaryExpr{ Op: LUNLESS, @@ -1095,7 +1169,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo / on(test,blub) group_left(bar) bar", expected: &BinaryExpr{ Op: DIV, @@ -1126,7 +1201,8 @@ var testExpr = []struct { Include: []string{"bar"}, }, }, - }, { + }, + { input: "foo / ignoring(test,blub) group_left(blub) bar", expected: &BinaryExpr{ Op: DIV, @@ -1156,7 +1232,8 @@ var testExpr = []struct { Include: []string{"blub"}, }, }, - }, { + }, + { input: "foo / ignoring(test,blub) group_left(bar) bar", expected: &BinaryExpr{ Op: DIV, @@ -1186,7 +1263,8 @@ var testExpr = []struct { Include: []string{"bar"}, }, }, - }, { + }, + { input: "foo - on(test,blub) group_right(bar,foo) bar", expected: &BinaryExpr{ Op: SUB, @@ -1217,7 +1295,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo - ignoring(test,blub) group_right(bar,foo) bar", expected: &BinaryExpr{ Op: SUB, @@ -1247,79 +1326,98 @@ var testExpr = []struct { Include: []string{"bar", "foo"}, }, }, - }, { + }, + { input: "foo and 1", fail: true, errMsg: "set operator \"and\" not allowed in binary scalar expression", - }, { + }, + { input: "1 and foo", fail: true, errMsg: "set operator \"and\" not allowed in binary scalar expression", - }, { + }, + { input: "foo or 1", fail: true, errMsg: "set operator \"or\" not allowed in binary scalar expression", - }, { + }, + { input: "1 or foo", fail: true, errMsg: "set operator \"or\" not allowed in binary scalar expression", - }, { + }, + { input: "foo unless 1", fail: true, errMsg: "set operator \"unless\" not allowed in binary scalar expression", - }, { + }, + { input: "1 unless foo", fail: true, errMsg: "set operator \"unless\" not allowed in binary scalar expression", - }, { + }, + { input: "1 or on(bar) foo", fail: true, errMsg: "vector matching only allowed between instant vectors", - }, { + }, + { input: "foo == on(bar) 10", fail: true, errMsg: "vector matching only allowed between instant vectors", - }, { + }, + { input: "foo + group_left(baz) bar", fail: true, errMsg: "unexpected ", - }, { + }, + { input: "foo and on(bar) group_left(baz) bar", fail: true, errMsg: "no grouping allowed for \"and\" operation", - }, { + }, + { input: "foo and on(bar) group_right(baz) bar", fail: true, errMsg: "no grouping allowed for \"and\" operation", - }, { + }, + { input: "foo or on(bar) group_left(baz) bar", fail: true, errMsg: "no grouping allowed for \"or\" operation", - }, { + }, + { input: "foo or on(bar) group_right(baz) bar", fail: true, errMsg: "no grouping allowed for \"or\" operation", - }, { + }, + { input: "foo unless on(bar) group_left(baz) bar", fail: true, errMsg: "no grouping allowed for \"unless\" operation", - }, { + }, + { input: "foo unless on(bar) group_right(baz) bar", fail: true, errMsg: "no grouping allowed for \"unless\" operation", - }, { + }, + { input: `http_requests{group="production"} + on(instance) group_left(job,instance) cpu_count{type="smp"}`, fail: true, errMsg: "label \"instance\" must not occur in ON and GROUP clause at once", - }, { + }, + { input: "foo + bool bar", fail: true, errMsg: "bool modifier can only be used on comparison operators", - }, { + }, + { input: "foo + bool 10", fail: true, errMsg: "bool modifier can only be used on comparison operators", - }, { + }, + { input: "foo and bool 10", fail: true, errMsg: "bool modifier can only be used on comparison operators", @@ -1337,7 +1435,8 @@ var testExpr = []struct { End: 3, }, }, - }, { + }, + { input: "min", expected: &VectorSelector{ Name: "min", @@ -1349,7 +1448,8 @@ var testExpr = []struct { End: 3, }, }, - }, { + }, + { input: "foo offset 5m", expected: &VectorSelector{ Name: "foo", @@ -1362,7 +1462,8 @@ var testExpr = []struct { End: 13, }, }, - }, { + }, + { input: "foo offset -7m", expected: &VectorSelector{ Name: "foo", @@ -1375,7 +1476,8 @@ var testExpr = []struct { End: 14, }, }, - }, { + }, + { input: `foo OFFSET 1h30m`, expected: &VectorSelector{ Name: "foo", @@ -1388,7 +1490,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `foo OFFSET 1m30ms`, expected: &VectorSelector{ Name: "foo", @@ -1401,7 +1504,8 @@ var testExpr = []struct { End: 17, }, }, - }, { + }, + { input: `foo @ 1603774568`, expected: &VectorSelector{ Name: "foo", @@ -1414,7 +1518,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `foo @ -100`, expected: &VectorSelector{ Name: "foo", @@ -1427,7 +1532,8 @@ var testExpr = []struct { End: 10, }, }, - }, { + }, + { input: `foo @ .3`, expected: &VectorSelector{ Name: "foo", @@ -1440,7 +1546,8 @@ var testExpr = []struct { End: 8, }, }, - }, { + }, + { input: `foo @ 3.`, expected: &VectorSelector{ Name: "foo", @@ -1453,7 +1560,8 @@ var testExpr = []struct { End: 8, }, }, - }, { + }, + { input: `foo @ 3.33`, expected: &VectorSelector{ Name: "foo", @@ -1466,7 +1574,8 @@ var testExpr = []struct { End: 10, }, }, - }, { // Rounding off. + }, + { // Rounding off. input: `foo @ 3.3333`, expected: &VectorSelector{ Name: "foo", @@ -1479,7 +1588,8 @@ var testExpr = []struct { End: 12, }, }, - }, { // Rounding off. + }, + { // Rounding off. input: `foo @ 3.3335`, expected: &VectorSelector{ Name: "foo", @@ -1492,7 +1602,8 @@ var testExpr = []struct { End: 12, }, }, - }, { + }, + { input: `foo @ 3e2`, expected: &VectorSelector{ Name: "foo", @@ -1505,7 +1616,8 @@ var testExpr = []struct { End: 9, }, }, - }, { + }, + { input: `foo @ 3e-1`, expected: &VectorSelector{ Name: "foo", @@ -1518,7 +1630,8 @@ var testExpr = []struct { End: 10, }, }, - }, { + }, + { input: `foo @ 0xA`, expected: &VectorSelector{ Name: "foo", @@ -1531,7 +1644,8 @@ var testExpr = []struct { End: 9, }, }, - }, { + }, + { input: `foo @ -3.3e1`, expected: &VectorSelector{ Name: "foo", @@ -1544,27 +1658,33 @@ var testExpr = []struct { End: 12, }, }, - }, { + }, + { input: `foo @ +Inf`, fail: true, errMsg: "1:1: parse error: timestamp out of bounds for @ modifier: +Inf", - }, { + }, + { input: `foo @ -Inf`, fail: true, errMsg: "1:1: parse error: timestamp out of bounds for @ modifier: -Inf", - }, { + }, + { input: `foo @ NaN`, fail: true, errMsg: "1:1: parse error: timestamp out of bounds for @ modifier: NaN", - }, { + }, + { input: fmt.Sprintf(`foo @ %f`, float64(math.MaxInt64)+1), fail: true, errMsg: fmt.Sprintf("1:1: parse error: timestamp out of bounds for @ modifier: %f", float64(math.MaxInt64)+1), - }, { + }, + { input: fmt.Sprintf(`foo @ %f`, float64(math.MinInt64)-1), fail: true, errMsg: fmt.Sprintf("1:1: parse error: timestamp out of bounds for @ modifier: %f", float64(math.MinInt64)-1), - }, { + }, + { input: `foo:bar{a="bc"}`, expected: &VectorSelector{ Name: "foo:bar", @@ -1577,7 +1697,8 @@ var testExpr = []struct { End: 15, }, }, - }, { + }, + { input: `foo{NaN='bc'}`, expected: &VectorSelector{ Name: "foo", @@ -1590,7 +1711,8 @@ var testExpr = []struct { End: 13, }, }, - }, { + }, + { input: `foo{bar='}'}`, expected: &VectorSelector{ Name: "foo", @@ -1603,7 +1725,8 @@ var testExpr = []struct { End: 12, }, }, - }, { + }, + { input: `foo{a="b", foo!="bar", test=~"test", bar!~"baz"}`, expected: &VectorSelector{ Name: "foo", @@ -1619,7 +1742,8 @@ var testExpr = []struct { End: 48, }, }, - }, { + }, + { input: `foo{a="b", foo!="bar", test=~"test", bar!~"baz",}`, expected: &VectorSelector{ Name: "foo", @@ -1635,89 +1759,110 @@ var testExpr = []struct { End: 49, }, }, - }, { + }, + { input: `{`, fail: true, errMsg: "unexpected end of input inside braces", - }, { + }, + { input: `}`, fail: true, errMsg: "unexpected character: '}'", - }, { + }, + { input: `some{`, fail: true, errMsg: "unexpected end of input inside braces", - }, { + }, + { input: `some}`, fail: true, errMsg: "unexpected character: '}'", - }, { + }, + { input: `some_metric{a=b}`, fail: true, errMsg: "unexpected identifier \"b\" in label matching, expected string", - }, { + }, + { input: `some_metric{a:b="b"}`, fail: true, errMsg: "unexpected character inside braces: ':'", - }, { + }, + { input: `foo{a*"b"}`, fail: true, errMsg: "unexpected character inside braces: '*'", - }, { + }, + { input: `foo{a>="b"}`, fail: true, // TODO(fabxc): willingly lexing wrong tokens allows for more precise error // messages from the parser - consider if this is an option. errMsg: "unexpected character inside braces: '>'", - }, { + }, + { input: "some_metric{a=\"\xff\"}", fail: true, errMsg: "1:15: parse error: invalid UTF-8 rune", - }, { + }, + { input: `foo{gibberish}`, fail: true, errMsg: `unexpected "}" in label matching, expected label matching operator`, - }, { + }, + { input: `foo{1}`, fail: true, errMsg: "unexpected character inside braces: '1'", - }, { + }, + { input: `{}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x=""}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x=~".*"}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x!~".+"}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x!="a"}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `foo{__name__="bar"}`, fail: true, errMsg: `metric name must not be set twice: "foo" or "bar"`, - }, { + }, + { input: `foo{__name__= =}`, fail: true, errMsg: `1:15: parse error: unexpected "=" in label matching, expected string`, - }, { + }, + { input: `foo{,}`, fail: true, errMsg: `unexpected "," in label matching, expected identifier or "}"`, - }, { + }, + { input: `foo{__name__ == "bar"}`, fail: true, errMsg: `1:15: parse error: unexpected "=" in label matching, expected string`, - }, { + }, + { input: `foo{__name__="bar" lol}`, fail: true, errMsg: `unexpected identifier "lol" in label matching, expected "," or "}"`, @@ -1739,7 +1884,8 @@ var testExpr = []struct { Range: 5 * time.Second, EndPos: 8, }, - }, { + }, + { input: "test[5m]", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1755,7 +1901,8 @@ var testExpr = []struct { Range: 5 * time.Minute, EndPos: 8, }, - }, { + }, + { input: `foo[5m30s]`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1771,7 +1918,8 @@ var testExpr = []struct { Range: 5*time.Minute + 30*time.Second, EndPos: 10, }, - }, { + }, + { input: "test[5h] OFFSET 5m", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1788,7 +1936,8 @@ var testExpr = []struct { Range: 5 * time.Hour, EndPos: 18, }, - }, { + }, + { input: "test[5d] OFFSET 10s", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1805,7 +1954,8 @@ var testExpr = []struct { Range: 5 * 24 * time.Hour, EndPos: 19, }, - }, { + }, + { input: "test[5w] offset 2w", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1822,7 +1972,8 @@ var testExpr = []struct { Range: 5 * 7 * 24 * time.Hour, EndPos: 18, }, - }, { + }, + { input: `test{a="b"}[5y] OFFSET 3d`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1840,7 +1991,8 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 25, }, - }, { + }, + { input: `test{a="b"}[5y] @ 1603774699`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1858,70 +2010,87 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 28, }, - }, { + }, + { input: `foo[5mm]`, fail: true, errMsg: "bad duration syntax: \"5mm\"", - }, { + }, + { input: `foo[5m1]`, fail: true, errMsg: "bad duration syntax: \"5m1\"", - }, { + }, + { input: `foo[5m:1m1]`, fail: true, errMsg: "bad number or duration syntax: \"1m1\"", - }, { + }, + { input: `foo[5y1hs]`, fail: true, errMsg: "not a valid duration string: \"5y1hs\"", - }, { + }, + { input: `foo[5m1h]`, fail: true, errMsg: "not a valid duration string: \"5m1h\"", - }, { + }, + { input: `foo[5m1m]`, fail: true, errMsg: "not a valid duration string: \"5m1m\"", - }, { + }, + { input: `foo[0m]`, fail: true, errMsg: "duration must be greater than 0", - }, { + }, + { input: `foo["5m"]`, fail: true, - }, { + }, + { input: `foo[]`, fail: true, errMsg: "missing unit character in duration", - }, { + }, + { input: `foo[1]`, fail: true, errMsg: "missing unit character in duration", - }, { + }, + { input: `some_metric[5m] OFFSET 1`, fail: true, errMsg: "unexpected number \"1\" in offset, expected duration", - }, { + }, + { input: `some_metric[5m] OFFSET 1mm`, fail: true, errMsg: "bad number or duration syntax: \"1mm\"", - }, { + }, + { input: `some_metric[5m] OFFSET`, fail: true, errMsg: "unexpected end of input in offset, expected duration", - }, { + }, + { input: `some_metric OFFSET 1m[5m]`, fail: true, errMsg: "1:22: parse error: no offset modifiers allowed before range", - }, { + }, + { input: `some_metric[5m] @ 1m`, fail: true, errMsg: "1:19: parse error: unexpected duration \"1m\" in @, expected timestamp", - }, { + }, + { input: `some_metric[5m] @`, fail: true, errMsg: "1:18: parse error: unexpected end of input in @, expected timestamp", - }, { + }, + { input: `some_metric @ 1234 [5m]`, fail: true, errMsg: "1:20: parse error: no @ modifiers allowed before range", @@ -1952,7 +2121,8 @@ var testExpr = []struct { End: 25, }, }, - }, { + }, + { input: "avg by (foo)(some_metric)", expected: &AggregateExpr{ Op: AVG, @@ -1972,7 +2142,8 @@ var testExpr = []struct { End: 25, }, }, - }, { + }, + { input: "max by (foo)(some_metric)", expected: &AggregateExpr{ Op: MAX, @@ -1992,7 +2163,8 @@ var testExpr = []struct { End: 25, }, }, - }, { + }, + { input: "sum without (foo) (some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2013,7 +2185,8 @@ var testExpr = []struct { End: 31, }, }, - }, { + }, + { input: "sum (some_metric) without (foo)", expected: &AggregateExpr{ Op: SUM, @@ -2034,7 +2207,8 @@ var testExpr = []struct { End: 31, }, }, - }, { + }, + { input: "stddev(some_metric)", expected: &AggregateExpr{ Op: STDDEV, @@ -2053,7 +2227,8 @@ var testExpr = []struct { End: 19, }, }, - }, { + }, + { input: "stdvar by (foo)(some_metric)", expected: &AggregateExpr{ Op: STDVAR, @@ -2073,7 +2248,8 @@ var testExpr = []struct { End: 28, }, }, - }, { + }, + { input: "sum by ()(some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2093,7 +2269,8 @@ var testExpr = []struct { End: 22, }, }, - }, { + }, + { input: "sum by (foo,bar,)(some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2113,7 +2290,8 @@ var testExpr = []struct { End: 30, }, }, - }, { + }, + { input: "sum by (foo,)(some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2133,7 +2311,8 @@ var testExpr = []struct { End: 26, }, }, - }, { + }, + { input: "topk(5, some_metric)", expected: &AggregateExpr{ Op: TOPK, @@ -2159,7 +2338,8 @@ var testExpr = []struct { End: 20, }, }, - }, { + }, + { input: `count_values("value", some_metric)`, expected: &AggregateExpr{ Op: COUNT_VALUES, @@ -2185,7 +2365,8 @@ var testExpr = []struct { End: 34, }, }, - }, { + }, + { // Test usage of keywords as label names. input: "sum without(and, by, avg, count, alert, annotations)(some_metric)", expected: &AggregateExpr{ @@ -2207,67 +2388,83 @@ var testExpr = []struct { End: 65, }, }, - }, { + }, + { input: "sum without(==)(some_metric)", fail: true, errMsg: "unexpected in grouping opts, expected label", - }, { + }, + { input: "sum without(,)(some_metric)", fail: true, errMsg: `unexpected "," in grouping opts, expected label`, - }, { + }, + { input: "sum without(foo,,)(some_metric)", fail: true, errMsg: `unexpected "," in grouping opts, expected label`, - }, { + }, + { input: `sum some_metric by (test)`, fail: true, errMsg: "unexpected identifier \"some_metric\"", - }, { + }, + { input: `sum (some_metric) by test`, fail: true, errMsg: "unexpected identifier \"test\" in grouping opts", - }, { + }, + { input: `sum (some_metric) by test`, fail: true, errMsg: "unexpected identifier \"test\" in grouping opts", - }, { + }, + { input: `sum () by (test)`, fail: true, errMsg: "no arguments for aggregate expression provided", - }, { + }, + { input: "MIN keep_common (some_metric)", fail: true, errMsg: "1:5: parse error: unexpected identifier \"keep_common\"", - }, { + }, + { input: "MIN (some_metric) keep_common", fail: true, errMsg: `unexpected identifier "keep_common"`, - }, { + }, + { input: `sum (some_metric) without (test) by (test)`, fail: true, errMsg: "unexpected ", - }, { + }, + { input: `sum without (test) (some_metric) by (test)`, fail: true, errMsg: "unexpected ", - }, { + }, + { input: `topk(some_metric)`, fail: true, errMsg: "wrong number of arguments for aggregate expression provided, expected 2, got 1", - }, { + }, + { input: `topk(some_metric,)`, fail: true, errMsg: "trailing commas not allowed in function call args", - }, { + }, + { input: `topk(some_metric, other_metric)`, fail: true, errMsg: "1:6: parse error: expected type scalar in aggregation parameter, got instant vector", - }, { + }, + { input: `count_values(5, other_metric)`, fail: true, errMsg: "1:14: parse error: expected type string in aggregation parameter, got scalar", - }, { + }, + { input: `rate(some_metric[5m]) @ 1234`, fail: true, errMsg: "1:1: parse error: @ modifier must be preceded by an instant vector selector or range vector selector or a subquery", @@ -2283,7 +2480,8 @@ var testExpr = []struct { End: 6, }, }, - }, { + }, + { input: `floor(some_metric{foo!="bar"})`, expected: &Call{ Func: MustGetFunction("floor"), @@ -2305,7 +2503,8 @@ var testExpr = []struct { End: 30, }, }, - }, { + }, + { input: "rate(some_metric[5m])", expected: &Call{ Func: MustGetFunction("rate"), @@ -2330,7 +2529,8 @@ var testExpr = []struct { End: 21, }, }, - }, { + }, + { input: "round(some_metric)", expected: &Call{ Func: MustGetFunction("round"), @@ -2351,7 +2551,8 @@ var testExpr = []struct { End: 18, }, }, - }, { + }, + { input: "round(some_metric, 5)", expected: &Call{ Func: MustGetFunction("round"), @@ -2379,39 +2580,48 @@ var testExpr = []struct { End: 21, }, }, - }, { + }, + { input: "floor()", fail: true, errMsg: "expected 1 argument(s) in call to \"floor\", got 0", - }, { + }, + { input: "floor(some_metric, other_metric)", fail: true, errMsg: "expected 1 argument(s) in call to \"floor\", got 2", - }, { + }, + { input: "floor(some_metric, 1)", fail: true, errMsg: "expected 1 argument(s) in call to \"floor\", got 2", - }, { + }, + { input: "floor(1)", fail: true, errMsg: "expected type instant vector in call to function \"floor\", got scalar", - }, { + }, + { input: "hour(some_metric, some_metric, some_metric)", fail: true, errMsg: "expected at most 1 argument(s) in call to \"hour\", got 3", - }, { + }, + { input: "time(some_metric)", fail: true, errMsg: "expected 0 argument(s) in call to \"time\", got 1", - }, { + }, + { input: "non_existent_function_far_bar()", fail: true, errMsg: "unknown function with name \"non_existent_function_far_bar\"", - }, { + }, + { input: "rate(some_metric)", fail: true, errMsg: "expected type range vector in call to function \"rate\", got instant vector", - }, { + }, + { input: "label_replace(a, `b`, `c\xff`, `d`, `.*`)", fail: true, errMsg: "1:23: parse error: invalid UTF-8 rune", @@ -2421,28 +2631,34 @@ var testExpr = []struct { input: "-=", fail: true, errMsg: `unexpected "="`, - }, { + }, + { input: "++-++-+-+-<", fail: true, errMsg: `unexpected `, - }, { + }, + { input: "e-+=/(0)", fail: true, errMsg: `unexpected "="`, - }, { + }, + { input: "a>b()", fail: true, errMsg: `unknown function`, - }, { + }, + { input: "rate(avg)", fail: true, errMsg: `expected type range vector`, - }, { + }, + { // This is testing that we are not re-rendering the expression string for each error, which would timeout. input: "(" + strings.Repeat("-{}-1", 10000) + ")" + strings.Repeat("[1m:]", 1000), fail: true, errMsg: `1:3: parse error: vector selector must contain at least one non-empty matcher`, - }, { + }, + { input: "sum(sum)", expected: &AggregateExpr{ Op: SUM, @@ -2461,7 +2677,8 @@ var testExpr = []struct { End: 8, }, }, - }, { + }, + { input: "a + sum", expected: &BinaryExpr{ Op: ADD, @@ -2495,49 +2712,58 @@ var testExpr = []struct { Val: "double-quoted string \" with escaped quote", PosRange: PositionRange{Start: 0, End: 44}, }, - }, { + }, + { input: `'single-quoted string \' with escaped quote'`, expected: &StringLiteral{ Val: "single-quoted string ' with escaped quote", PosRange: PositionRange{Start: 0, End: 44}, }, - }, { + }, + { input: "`backtick-quoted string`", expected: &StringLiteral{ Val: "backtick-quoted string", PosRange: PositionRange{Start: 0, End: 24}, }, - }, { + }, + { input: `"\a\b\f\n\r\t\v\\\" - \xFF\377\u1234\U00010111\U0001011111☺"`, expected: &StringLiteral{ Val: "\a\b\f\n\r\t\v\\\" - \xFF\377\u1234\U00010111\U0001011111☺", PosRange: PositionRange{Start: 0, End: 62}, }, - }, { + }, + { input: `'\a\b\f\n\r\t\v\\\' - \xFF\377\u1234\U00010111\U0001011111☺'`, expected: &StringLiteral{ Val: "\a\b\f\n\r\t\v\\' - \xFF\377\u1234\U00010111\U0001011111☺", PosRange: PositionRange{Start: 0, End: 62}, }, - }, { + }, + { input: "`" + `\a\b\f\n\r\t\v\\\"\' - \xFF\377\u1234\U00010111\U0001011111☺` + "`", expected: &StringLiteral{ Val: `\a\b\f\n\r\t\v\\\"\' - \xFF\377\u1234\U00010111\U0001011111☺`, PosRange: PositionRange{Start: 0, End: 64}, }, - }, { + }, + { input: "`\\``", fail: true, errMsg: "unterminated raw string", - }, { + }, + { input: `"\`, fail: true, errMsg: "escape sequence not terminated", - }, { + }, + { input: `"\c"`, fail: true, errMsg: "unknown escape sequence U+0063 'c'", - }, { + }, + { input: `"\x."`, fail: true, errMsg: "illegal character U+002E '.' in escape sequence", @@ -2580,7 +2806,8 @@ var testExpr = []struct { Step: time.Hour + 6*time.Millisecond, EndPos: 27, }, - }, { + }, + { input: `foo[10m:]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2596,7 +2823,8 @@ var testExpr = []struct { Range: 10 * time.Minute, EndPos: 9, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:5s])`, expected: &Call{ Func: MustGetFunction("min_over_time"), @@ -2637,7 +2865,8 @@ var testExpr = []struct { End: 46, }, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:])[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2681,7 +2910,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 51, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] offset 4m)[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2726,7 +2956,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 61, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] @ 1603775091)[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2771,7 +3002,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 64, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] @ -160377509)[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2816,7 +3048,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 64, }, - }, { + }, + { input: "sum without(and, by, avg, count, alert, annotations)(some_metric) [30m:10s]", expected: &SubqueryExpr{ Expr: &AggregateExpr{ @@ -2842,7 +3075,8 @@ var testExpr = []struct { Step: 10 * time.Second, EndPos: 75, }, - }, { + }, + { input: `some_metric OFFSET 1m [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2860,7 +3094,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 30, }, - }, { + }, + { input: `some_metric @ 123 [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2878,7 +3113,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 26, }, - }, { + }, + { input: `some_metric @ 123 offset 1m [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2897,7 +3133,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 36, }, - }, { + }, + { input: `some_metric offset 1m @ 123 [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2916,7 +3153,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 36, }, - }, { + }, + { input: `some_metric[10m:5s] offset 1m @ 123`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2935,7 +3173,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 35, }, - }, { + }, + { input: `(foo + bar{nm="val"})[5m:]`, expected: &SubqueryExpr{ Expr: &ParenExpr{ @@ -2974,7 +3213,8 @@ var testExpr = []struct { Range: 5 * time.Minute, EndPos: 26, }, - }, { + }, + { input: `(foo + bar{nm="val"})[5m:] offset 10m`, expected: &SubqueryExpr{ Expr: &ParenExpr{ @@ -3014,7 +3254,8 @@ var testExpr = []struct { OriginalOffset: 10 * time.Minute, EndPos: 37, }, - }, { + }, + { input: `(foo + bar{nm="val"} @ 1234)[5m:] @ 1603775019`, expected: &SubqueryExpr{ Expr: &ParenExpr{ @@ -3055,19 +3296,23 @@ var testExpr = []struct { Timestamp: makeInt64Pointer(1603775019000), EndPos: 46, }, - }, { + }, + { input: "test[5d] OFFSET 10s [10m:5s]", fail: true, errMsg: "1:1: parse error: subquery is only allowed on instant vector, got matrix", - }, { + }, + { input: `(foo + bar{nm="val"})[5m:][10m:5s]`, fail: true, errMsg: `1:1: parse error: subquery is only allowed on instant vector, got matrix`, - }, { + }, + { input: "rate(food[1m])[1h] offset 1h", fail: true, errMsg: `1:15: parse error: ranges only allowed for vector selectors`, - }, { + }, + { input: "rate(food[1m])[1h] @ 100", fail: true, errMsg: `1:15: parse error: ranges only allowed for vector selectors`, @@ -3086,7 +3331,8 @@ var testExpr = []struct { End: 13, }, }, - }, { + }, + { input: `foo @ end()`, expected: &VectorSelector{ Name: "foo", @@ -3099,7 +3345,8 @@ var testExpr = []struct { End: 11, }, }, - }, { + }, + { input: `test[5y] @ start()`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -3116,7 +3363,8 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 18, }, - }, { + }, + { input: `test[5y] @ end()`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -3133,7 +3381,8 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 16, }, - }, { + }, + { input: `foo[10m:6s] @ start()`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -3151,7 +3400,8 @@ var testExpr = []struct { StartOrEnd: START, EndPos: 21, }, - }, { + }, + { input: `foo[10m:6s] @ end()`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -3169,11 +3419,13 @@ var testExpr = []struct { StartOrEnd: END, EndPos: 19, }, - }, { + }, + { input: `start()`, fail: true, errMsg: `1:6: parse error: unexpected "("`, - }, { + }, + { input: `end()`, fail: true, errMsg: `1:4: parse error: unexpected "("`, @@ -3191,7 +3443,8 @@ var testExpr = []struct { End: 5, }, }, - }, { + }, + { input: `end`, expected: &VectorSelector{ Name: "end", @@ -3203,7 +3456,8 @@ var testExpr = []struct { End: 3, }, }, - }, { + }, + { input: `start{end="foo"}`, expected: &VectorSelector{ Name: "start", @@ -3216,7 +3470,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `end{start="foo"}`, expected: &VectorSelector{ Name: "end", @@ -3229,7 +3484,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `foo unless on(start) bar`, expected: &BinaryExpr{ Op: LUNLESS, @@ -3259,7 +3515,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: `foo unless on(end) bar`, expected: &BinaryExpr{ Op: LUNLESS, diff --git a/promql/query_logger.go b/promql/query_logger.go index cf2fbbfcc..ecf93765c 100644 --- a/promql/query_logger.go +++ b/promql/query_logger.go @@ -81,8 +81,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { } func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) { - - file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0666) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err) return nil, err @@ -104,7 +103,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, er } func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { - err := os.MkdirAll(localStoragePath, 0777) + err := os.MkdirAll(localStoragePath, 0o777) if err != nil { level.Error(logger).Log("msg", "Failed to create directory for logging active queries") } @@ -147,7 +146,6 @@ func trimStringByBytes(str string, size int) string { func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte { entry := Entry{query, timestamp} jsonEntry, err := json.Marshal(entry) - if err != nil { level.Error(logger).Log("msg", "Cannot create json of query", "query", query) return []byte{} diff --git a/rules/alerting.go b/rules/alerting.go index 28abcf2a5..524d80afe 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -311,7 +311,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, resultFPs := map[uint64]struct{}{} var vec promql.Vector - var alerts = make(map[uint64]*Alert, len(res)) + alerts := make(map[uint64]*Alert, len(res)) for _, smpl := range res { // Provide the alert information to the template. l := make(map[string]string, len(smpl.Metric)) @@ -479,7 +479,7 @@ func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) { } } -func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) { +func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay, interval time.Duration, notifyFunc NotifyFunc) { alerts := []*Alert{} r.ForEachActiveAlert(func(alert *Alert) { if alert.needsSending(ts, resendDelay) { diff --git a/rules/manager.go b/rules/manager.go index fa8cd6763..2ab7eb62d 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -834,12 +834,10 @@ func (g *Group) RestoreForState(ts time.Time) { level.Debug(g.logger).Log("msg", "'for' state restored", labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850), "labels", a.Labels.String()) - }) alertRule.SetRestored(true) } - } // Equals return if two groups are the same. diff --git a/rules/manager_test.go b/rules/manager_test.go index f3b335070..b70409842 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -119,17 +119,19 @@ func TestAlertingRule(t *testing.T) { baseTime := time.Unix(0, 0) - var tests = []struct { + tests := []struct { time time.Duration result promql.Vector }{ { time: 0, result: result[:2], - }, { + }, + { time: 5 * time.Minute, result: result[2:], - }, { + }, + { time: 10 * time.Minute, result: result[2:3], }, @@ -256,7 +258,7 @@ func TestForStateAddSamples(t *testing.T) { baseTime := time.Unix(0, 0) - var tests = []struct { + tests := []struct { time time.Duration result promql.Vector persistThisTime bool // If true, it means this 'time' is persisted for 'for'. @@ -769,7 +771,6 @@ func TestUpdate(t *testing.T) { } else { rgs.Groups[i].Interval = model.Duration(10) } - } reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 1e0c68d6d..3b34f1416 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -109,7 +109,6 @@ func TestDroppedTargetsList(t *testing.T) { // TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated // even when new labels don't affect the target `hash`. func TestDiscoveredLabelsUpdate(t *testing.T) { - sp := &scrapePool{} // These are used when syncing so need this to avoid a panic. sp.config = &config.ScrapeConfig{ @@ -350,7 +349,7 @@ func TestScrapePoolTargetLimit(t *testing.T) { client: http.DefaultClient, } - var tgs = []*targetgroup.Group{} + tgs := []*targetgroup.Group{} for i := 0; i < 50; i++ { tgs = append(tgs, &targetgroup.Group{ @@ -1000,6 +999,7 @@ func BenchmarkScrapeLoopAppend(b *testing.B) { _, _, _, _ = sl.append(slApp, metrics, "", ts) } } + func BenchmarkScrapeLoopAppendOM(b *testing.B) { ctx, sl := simpleTestScrapeLoop(b) @@ -1409,8 +1409,10 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { "Two target labels collide with existing labels, both with and without prefix 'exported'": { targetLabels: []string{"foo", "3", "exported_foo", "4"}, exposedLabels: `metric{foo="1" exported_foo="2"} 0`, - expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo", - "2", "exported_foo", "4", "foo", "3"}, + expected: []string{ + "__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo", + "2", "exported_foo", "4", "foo", "3", + }, }, "Extreme example": { targetLabels: []string{"foo", "0", "exported_exported_foo", "1", "exported_exported_exported_foo", "2"}, @@ -1743,7 +1745,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { exemplars: []exemplar.Exemplar{ {Labels: labels.FromStrings("a", "abc"), Value: 1}, }, - }, { + }, + { title: "Metric with exemplars and TS", scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF", discoveryLabels: []string{"n", "2"}, @@ -1754,7 +1757,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { exemplars: []exemplar.Exemplar{ {Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true}, }, - }, { + }, + { title: "Two metrics and exemplars", scrapeText: `metric_total{n="1"} 1 # {t="1"} 1.0 10000 metric_total{n="2"} 2 # {t="2"} 2.0 20000 @@ -2040,7 +2044,6 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { require.Equal(t, 1, total) require.Equal(t, 1, added) require.Equal(t, 0, seriesAdded) - } func TestTargetScraperScrapeOK(t *testing.T) { diff --git a/storage/buffer.go b/storage/buffer.go index feca1d91e..68cda5f94 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -275,7 +275,7 @@ func (r *sampleRing) nthLast(n int) (int64, float64, bool) { func (r *sampleRing) samples() []sample { res := make([]sample, r.l) - var k = r.f + r.l + k := r.f + r.l var j int if k > len(r.buf) { k = len(r.buf) diff --git a/storage/merge.go b/storage/merge.go index 2a68ad96a..49757e2c3 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -42,7 +42,7 @@ type mergeGenericQuerier struct { // See NewFanout commentary to learn more about primary vs secondary differences. // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. -func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { +func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { if _, ok := q.(noopQuerier); !ok && q != nil { @@ -71,7 +71,7 @@ func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn Vertica // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 -func NewMergeChunkQuerier(primaries []ChunkQuerier, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { +func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { if _, ok := q.(noopChunkQuerier); !ok && q != nil { @@ -104,7 +104,7 @@ func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matche return q.queriers[0].Select(sortSeries, hints, matchers...) } - var seriesSets = make([]genericSeriesSet, 0, len(q.queriers)) + seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) if !q.concurrentSelect { for _, querier := range q.queriers { // We need to sort for merge to work. @@ -265,7 +265,6 @@ func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) Seri genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericSeriesSetAdapter{s}) - } return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)} } @@ -281,7 +280,6 @@ func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, mergeFunc VerticalChunkSeries genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s}) - } return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)} } diff --git a/storage/merge_test.go b/storage/merge_test.go index 23eab0f70..0aabcdb96 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -616,7 +616,8 @@ func TestChainSampleIterator(t *testing.T) { NewListSeriesIterator(samples{sample{2, 2}, sample{5, 5}}), }, expected: []tsdbutil.Sample{ - sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}}, + sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}, + }, }, // Overlap. { diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 9ac3b926b..8664555a5 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -728,7 +728,7 @@ func (t *QueueManager) releaseLabels(ls labels.Labels) { // processExternalLabels merges externalLabels into ls. If ls contains // a label in externalLabels, the value in ls wins. -func processExternalLabels(ls labels.Labels, externalLabels labels.Labels) labels.Labels { +func processExternalLabels(ls, externalLabels labels.Labels) labels.Labels { i, j, result := 0, 0, make(labels.Labels, 0, len(ls)+len(externalLabels)) for i < len(ls) && j < len(externalLabels) { if ls[i].Name < externalLabels[j].Name { @@ -1048,7 +1048,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface max += int(float64(max) * 0.1) } - var pendingData = make([]prompb.TimeSeries, max) + pendingData := make([]prompb.TimeSeries, max) for i := range pendingData { pendingData[i].Samples = []prompb.Sample{{}} if s.qm.sendExemplars { @@ -1142,7 +1142,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface } } -func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) { +func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) { begin := time.Now() err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf) if err != nil { @@ -1159,7 +1159,7 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s } // sendSamples to the remote storage with backoff for recoverable errors. -func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error { +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error { // Build the WriteRequest with no metadata. req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf) if err != nil { diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index f56b6d90c..9a07f7c99 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -60,7 +60,6 @@ func newHighestTimestampMetric() *maxTimestamp { } func TestSampleDelivery(t *testing.T) { - testcases := []struct { name string samples bool @@ -107,7 +106,6 @@ func TestSampleDelivery(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - var ( series []record.RefSeries samples []record.RefSample @@ -715,7 +713,7 @@ func BenchmarkSampleDelivery(b *testing.B) { const numSeries = 10000 // Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics. - var extraLabels = labels.Labels{ + extraLabels := labels.Labels{ {Name: "kubernetes_io_arch", Value: "amd64"}, {Name: "kubernetes_io_instance_type", Value: "c3.somesize"}, {Name: "kubernetes_io_os", Value: "linux"}, diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index 646d00c46..67c0f6f74 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -506,7 +506,6 @@ func TestSampleAndChunkQueryableClient(t *testing.T) { } require.NoError(t, ss.Err()) require.Equal(t, tc.expectedSeries, got) - }) } } diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 92637cf47..ba8b3446b 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -81,9 +81,7 @@ func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, } func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { - var ( - outOfOrderExemplarErrs = 0 - ) + outOfOrderExemplarErrs := 0 app := h.appendable.Appender(ctx) defer func() { diff --git a/template/template.go b/template/template.go index dca5aa432..a8abda8f0 100644 --- a/template/template.go +++ b/template/template.go @@ -87,7 +87,7 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer // promql.Vector is hard to work with in templates, so convert to // base data types. // TODO(fabxc): probably not true anymore after type rework. - var result = make(queryResult, len(vector)) + result := make(queryResult, len(vector)) for n, v := range vector { s := sample{ Value: v.V, @@ -301,7 +301,7 @@ func NewTemplateExpander( } // AlertTemplateData returns the interface to be used in expanding the template. -func AlertTemplateData(labels map[string]string, externalLabels map[string]string, externalURL string, value float64) interface{} { +func AlertTemplateData(labels, externalLabels map[string]string, externalURL string, value float64) interface{} { return struct { Labels map[string]string ExternalLabels map[string]string diff --git a/template/template_test.go b/template/template_test.go index 6c67789ad..8cb657867 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -87,7 +87,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "11", }, { @@ -98,7 +99,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "a", }, { @@ -108,7 +110,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "__value__", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "a", }, { @@ -118,7 +121,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "", }, { @@ -128,7 +132,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "", }, { @@ -137,7 +142,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "", html: true, }, @@ -151,7 +157,8 @@ func TestTemplateExpansion(t *testing.T) { }, { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "a:11: b:21: ", }, { diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 40e192f5c..30d779355 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -36,9 +36,7 @@ import ( "go.uber.org/atomic" ) -var ( - ErrUnsupported = errors.New("unsupported operation with WAL-only storage") -) +var ErrUnsupported = errors.New("unsupported operation with WAL-only storage") // Default values for options. var ( diff --git a/tsdb/agent/series.go b/tsdb/agent/series.go index 71b3ca2e2..557f709bb 100644 --- a/tsdb/agent/series.go +++ b/tsdb/agent/series.go @@ -54,7 +54,7 @@ func (m seriesHashmap) Set(hash uint64, s *memSeries) { m[hash] = append(seriesSet, s) } -func (m seriesHashmap) Delete(hash uint64, ref uint64) { +func (m seriesHashmap) Delete(hash, ref uint64) { var rem []*memSeries for _, s := range m[hash] { if s.ref != ref { diff --git a/tsdb/block.go b/tsdb/block.go index 42a91ff59..88c0a30f5 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -191,9 +191,11 @@ type BlockMetaCompaction struct { Failed bool `json:"failed,omitempty"` } -const indexFilename = "index" -const metaFilename = "meta.json" -const metaVersion1 = 1 +const ( + indexFilename = "index" + metaFilename = "meta.json" + metaVersion1 = 1 +) func chunkDir(dir string) string { return filepath.Join(dir, "chunks") } @@ -611,12 +613,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er // Snapshot creates snapshot of the block into dir. func (pb *Block) Snapshot(dir string) error { blockDir := filepath.Join(dir, pb.meta.ULID.String()) - if err := os.MkdirAll(blockDir, 0777); err != nil { + if err := os.MkdirAll(blockDir, 0o777); err != nil { return errors.Wrap(err, "create snapshot block dir") } chunksDir := chunkDir(blockDir) - if err := os.MkdirAll(chunksDir, 0777); err != nil { + if err := os.MkdirAll(chunksDir, 0o777); err != nil { return errors.Wrap(err, "create snapshot chunk dir") } diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 54cfbc2c4..88ca3f1eb 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -185,7 +185,7 @@ func TestCorruptedChunk(t *testing.T) { require.NoError(t, err) require.Greater(t, len(files), 0, "No chunk created.") - f, err := os.OpenFile(files[0], os.O_RDWR, 0666) + f, err := os.OpenFile(files[0], os.O_RDWR, 0o666) require.NoError(t, err) // Apply corruption function. @@ -505,7 +505,7 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string { compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil) require.NoError(tb, err) - require.NoError(tb, os.MkdirAll(dir, 0777)) + require.NoError(tb, os.MkdirAll(dir, 0o777)) // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go index 11417c38c..067c9ebc1 100644 --- a/tsdb/chunks/chunks.go +++ b/tsdb/chunks/chunks.go @@ -97,9 +97,7 @@ func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool { return cm.MinTime <= maxt && mint <= cm.MaxTime } -var ( - errInvalidSize = fmt.Errorf("invalid size") -) +var errInvalidSize = fmt.Errorf("invalid size") var castagnoliTable *crc32.Table @@ -148,7 +146,7 @@ func newWriter(dir string, segmentSize int64) (*Writer, error) { segmentSize = DefaultChunkSegmentSize } - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } dirFile, err := fileutil.OpenDir(dir) @@ -224,7 +222,7 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all return 0, nil, 0, errors.Wrap(err, "next sequence file") } ptmp := p + ".tmp" - f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0666) + f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0o666) if err != nil { return 0, nil, 0, errors.Wrap(err, "open temp file") } @@ -266,7 +264,7 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all return 0, nil, 0, errors.Wrap(err, "replace file") } - f, err = os.OpenFile(p, os.O_WRONLY, 0666) + f, err = os.OpenFile(p, os.O_WRONLY, 0o666) if err != nil { return 0, nil, 0, errors.Wrap(err, "open final file") } @@ -355,7 +353,7 @@ func (w *Writer) writeChunks(chks []Meta) error { return nil } - var seq = uint64(w.seq()) << 32 + seq := uint64(w.seq()) << 32 for i := range chks { chk := &chks[i] diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index f4ac61fcc..cc6c27be3 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -42,11 +42,9 @@ const ( headChunksFormatV1 = 1 ) -var ( - // ErrChunkDiskMapperClosed returned by any method indicates - // that the ChunkDiskMapper was closed. - ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed") -) +// ErrChunkDiskMapperClosed returned by any method indicates +// that the ChunkDiskMapper was closed. +var ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed") const ( // MintMaxtSize is the size of the mint/maxt for head chunk file and chunks. @@ -83,7 +81,6 @@ func (ref ChunkDiskMapperRef) Unpack() (sgmIndex, chkStart int) { sgmIndex = int(ref >> 32) chkStart = int((ref << 32) >> 32) return sgmIndex, chkStart - } // CorruptionErr is an error that's returned when corruption is encountered. @@ -152,7 +149,7 @@ func NewChunkDiskMapper(dir string, pool chunkenc.Pool, writeBufferSize int) (*C return nil, errors.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize) } - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } dirFile, err := fileutil.OpenDir(dir) diff --git a/tsdb/chunks/head_chunks_other.go b/tsdb/chunks/head_chunks_other.go index 8b37dd8c2..cd0e258a1 100644 --- a/tsdb/chunks/head_chunks_other.go +++ b/tsdb/chunks/head_chunks_other.go @@ -16,8 +16,6 @@ package chunks -var ( - // HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. - // Windows needs pre-allocations while the other OS does not. - HeadChunkFilePreallocationSize int64 -) +// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. +// Windows needs pre-allocations while the other OS does not. +var HeadChunkFilePreallocationSize int64 diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index f1aa13cec..873a00f67 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -379,7 +379,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { // Write an empty last file mimicking an abrupt shutdown on file creation. emptyFileName := segmentFile(dir, lastFile+1) - f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0666) + f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0o666) require.NoError(t, err) require.NoError(t, f.Sync()) stat, err := f.Stat() @@ -409,7 +409,6 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { require.NoError(t, err) require.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file") } - } func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper { diff --git a/tsdb/chunks/head_chunks_windows.go b/tsdb/chunks/head_chunks_windows.go index b772b64b4..214ee42f5 100644 --- a/tsdb/chunks/head_chunks_windows.go +++ b/tsdb/chunks/head_chunks_windows.go @@ -13,8 +13,6 @@ package chunks -var ( - // HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. - // Windows needs pre-allocation to m-map the file. - HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize -) +// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. +// Windows needs pre-allocation to m-map the file. +var HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize diff --git a/tsdb/compact.go b/tsdb/compact.go index b2ae7e4ea..ce197f09a 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -564,7 +564,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe return err } - if err = os.MkdirAll(tmp, 0777); err != nil { + if err = os.MkdirAll(tmp, 0o777); err != nil { return err } diff --git a/tsdb/db.go b/tsdb/db.go index c69f6f464..7326b904b 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -65,10 +65,8 @@ const ( lockfileCreatedCleanly = 1 ) -var ( - // ErrNotReady is returned if the underlying storage is not ready yet. - ErrNotReady = errors.New("TSDB not ready") -) +// ErrNotReady is returned if the underlying storage is not ready yet. +var ErrNotReady = errors.New("TSDB not ready") // DefaultOptions used for the DB. They are sane for setups using // millisecond precision timestamps. @@ -609,7 +607,7 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { } func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } if l == nil { @@ -1642,7 +1640,7 @@ func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, err return db.head.exemplars.ExemplarQuerier(ctx) } -func rangeForTimestamp(t int64, width int64) (maxt int64) { +func rangeForTimestamp(t, width int64) (maxt int64) { return (t/width)*width + width } diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 88342c8f8..3f657152f 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -228,7 +228,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { { walFiles, err := ioutil.ReadDir(path.Join(db.Dir(), "wal")) require.NoError(t, err) - f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0666) + f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0o666) require.NoError(t, err) r := wal.NewReader(bufio.NewReader(f)) require.True(t, r.Next(), "reading the series record") @@ -1245,7 +1245,6 @@ func TestTombstoneCleanRetentionLimitsRace(t *testing.T) { require.NoError(t, db.Close()) } - } func intersection(oldBlocks, actualBlocks []string) (intersection []string) { @@ -1272,6 +1271,7 @@ type mockCompactorFailing struct { func (*mockCompactorFailing) Plan(dir string) ([]string, error) { return nil, nil } + func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) { if len(c.blocks) >= c.max { return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") @@ -1559,7 +1559,7 @@ func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, map[string][]sample func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { // Create 10 blocks that does not overlap (0-10, 10-20, ..., 100-110) but in reverse order to ensure our algorithm // will handle that. - var metas = make([]BlockMeta, 11) + metas := make([]BlockMeta, 11) for i := 10; i >= 0; i-- { metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)} } @@ -1781,7 +1781,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { require.NoError(t, os.RemoveAll(dir)) }() - require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0777)) + require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) w, err := wal.New(nil, nil, path.Join(dir, "wal"), false) require.NoError(t, err) @@ -1831,7 +1831,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { createBlock(t, dir, genSeries(1, 1, 1000, 6000)) - require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0777)) + require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) w, err := wal.New(nil, nil, path.Join(dir, "wal"), false) require.NoError(t, err) @@ -2663,7 +2663,6 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) { for i, test := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { - tempDir, err := ioutil.TempDir("", "test_chunk_writer") require.NoError(t, err) defer func() { require.NoError(t, os.RemoveAll(tempDir)) }() @@ -2899,7 +2898,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { expectedLoadedDirs[outDir] = struct{}{} // Touch chunks dir in block. - require.NoError(t, os.MkdirAll(filepath.Join(dbDir, "chunks"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dbDir, "chunks"), 0o777)) defer func() { require.NoError(t, os.RemoveAll(filepath.Join(dbDir, "chunks"))) }() @@ -3166,7 +3165,7 @@ func TestLockfileMetric(t *testing.T) { // Test preconditions (file already exists + lockfile option) lockfilePath := filepath.Join(absdir, "lock") if c.fileAlreadyExists { - err = ioutil.WriteFile(lockfilePath, []byte{}, 0644) + err = ioutil.WriteFile(lockfilePath, []byte{}, 0o644) require.NoError(t, err) } opts := DefaultOptions() diff --git a/tsdb/encoding/encoding.go b/tsdb/encoding/encoding.go index 8a94ff7ba..c2da86afa 100644 --- a/tsdb/encoding/encoding.go +++ b/tsdb/encoding/encoding.go @@ -133,7 +133,6 @@ func NewDecbufAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf { dec := Decbuf{B: b[:len(b)-4]} if castagnoliTable != nil { - if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.Crc32(castagnoliTable) != exp { return Decbuf{E: ErrInvalidChecksum} } diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 086e69667..63ac512c9 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -284,7 +284,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int { // This math is essentially looking at nextIndex, where we would write the next exemplar to, // and find the index in the old exemplar buffer that we should start migrating exemplars from. // This way we don't migrate exemplars that would just be overwritten when migrating later exemplars. - var startIndex = (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) + startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) for i := int64(0); i < count; i++ { idx := (startIndex + i) % int64(len(oldBuffer)) diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index eb95daa34..db47780fd 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -448,7 +448,6 @@ func TestResize(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics) require.NoError(t, err) es := exs.(*CircularExemplarStorage) @@ -456,7 +455,8 @@ func TestResize(t *testing.T) { for i := 0; int64(i) < tc.startSize; i++ { err = es.AddExemplar(labels.FromStrings("service", strconv.Itoa(i)), exemplar.Exemplar{ Value: float64(i), - Ts: int64(i)}) + Ts: int64(i), + }) require.NoError(t, err) } diff --git a/tsdb/fileutil/fileutil.go b/tsdb/fileutil/fileutil.go index 927ebe004..8ab8ce3dd 100644 --- a/tsdb/fileutil/fileutil.go +++ b/tsdb/fileutil/fileutil.go @@ -27,7 +27,7 @@ import ( // CopyDirs copies all directories, subdirectories and files recursively including the empty folders. // Source and destination must be full paths. func CopyDirs(src, dest string) error { - if err := os.MkdirAll(dest, 0777); err != nil { + if err := os.MkdirAll(dest, 0o777); err != nil { return err } files, err := readDirs(src) @@ -46,7 +46,7 @@ func CopyDirs(src, dest string) error { // Empty directories are also created. if stat.IsDir() { - if err := os.MkdirAll(dp, 0777); err != nil { + if err := os.MkdirAll(dp, 0o777); err != nil { return err } continue @@ -65,7 +65,7 @@ func copyFile(src, dest string) error { return err } - err = ioutil.WriteFile(dest, data, 0666) + err = ioutil.WriteFile(dest, data, 0o666) if err != nil { return err } diff --git a/tsdb/fileutil/flock.go b/tsdb/fileutil/flock.go index d5eaa7ca2..e0082e2f2 100644 --- a/tsdb/fileutil/flock.go +++ b/tsdb/fileutil/flock.go @@ -29,7 +29,7 @@ type Releaser interface { // locking has failed. Neither this function nor the returned Releaser is // goroutine-safe. func Flock(fileName string) (r Releaser, existed bool, err error) { - if err = os.MkdirAll(filepath.Dir(fileName), 0755); err != nil { + if err = os.MkdirAll(filepath.Dir(fileName), 0o755); err != nil { return nil, false, err } diff --git a/tsdb/fileutil/flock_plan9.go b/tsdb/fileutil/flock_plan9.go index 71ed67e8c..3b9550e7f 100644 --- a/tsdb/fileutil/flock_plan9.go +++ b/tsdb/fileutil/flock_plan9.go @@ -24,7 +24,7 @@ func (l *plan9Lock) Release() error { } func newLock(fileName string) (Releaser, error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0666) + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0o666) if err != nil { return nil, err } diff --git a/tsdb/fileutil/flock_solaris.go b/tsdb/fileutil/flock_solaris.go index f19c184a4..21be384d3 100644 --- a/tsdb/fileutil/flock_solaris.go +++ b/tsdb/fileutil/flock_solaris.go @@ -46,7 +46,7 @@ func (l *unixLock) set(lock bool) error { } func newLock(fileName string) (Releaser, error) { - f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666) + f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666) if err != nil { return nil, err } diff --git a/tsdb/fileutil/flock_unix.go b/tsdb/fileutil/flock_unix.go index c0aeb6948..9637f073b 100644 --- a/tsdb/fileutil/flock_unix.go +++ b/tsdb/fileutil/flock_unix.go @@ -41,7 +41,7 @@ func (l *unixLock) set(lock bool) error { } func newLock(fileName string) (Releaser, error) { - f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666) + f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666) if err != nil { return nil, err } diff --git a/tsdb/head.go b/tsdb/head.go index 80edf794e..8bfbb1174 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -850,7 +850,7 @@ func (h *Head) WaitForPendingReadersInTimeRange(mint, maxt int64) { // new range head and the new querier. This methods helps preventing races with the truncation of in-memory data. // // NOTE: The querier should already be taken before calling this. -func (h *Head) IsQuerierCollidingWithTruncation(querierMint, querierMaxt int64) (shouldClose bool, getNew bool, newMint int64) { +func (h *Head) IsQuerierCollidingWithTruncation(querierMint, querierMaxt int64) (shouldClose, getNew bool, newMint int64) { if !h.memTruncationInProcess.Load() { return false, false, 0 } @@ -1197,7 +1197,6 @@ func (h *Head) Close() error { errs.Add(h.performChunkSnapshot()) } return errs.Err() - } // String returns an human readable representation of the TSDB head. It's important to diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 101847640..8ace65355 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -842,7 +842,6 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) { require.Equal(t, 1, series) require.Equal(t, 9999, samples) require.Equal(t, 1, stones) - } func TestDelete_e2e(t *testing.T) { @@ -1472,7 +1471,7 @@ func TestHeadReadWriterRepair(t *testing.T) { require.Equal(t, 7, len(files)) // Corrupt the 4th file by writing a random byte to series ref. - f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0666) + f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0o666) require.NoError(t, err) n, err := f.WriteAt([]byte{67, 88}, chunks.HeadChunkFileHeaderSize+2) require.NoError(t, err) @@ -1946,7 +1945,8 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { lastSeriesTimestamp int64 = 300 ) var ( - seriesTimestamps = []int64{firstSeriesTimestamp, + seriesTimestamps = []int64{ + firstSeriesTimestamp, secondSeriesTimestamp, lastSeriesTimestamp, } @@ -1963,7 +1963,7 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { require.Equal(t, head.MinTime(), firstSeriesTimestamp) require.Equal(t, head.MaxTime(), lastSeriesTimestamp) - var testCases = []struct { + testCases := []struct { name string mint int64 maxt int64 @@ -2007,7 +2007,7 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) { } require.NoError(t, app.Commit()) - var testCases = []struct { + testCases := []struct { name string labelName string matchers []*labels.Matcher @@ -2765,7 +2765,6 @@ func TestChunkSnapshot(t *testing.T) { require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal)) } - } func TestSnapshotError(t *testing.T) { diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 478829824..3f6416e17 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -15,11 +15,6 @@ package tsdb import ( "fmt" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/tsdb/encoding" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/prometheus/prometheus/tsdb/fileutil" "io/ioutil" "math" "os" @@ -30,6 +25,12 @@ import ( "sync" "time" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/encoding" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/go-kit/log/level" "github.com/pkg/errors" "go.uber.org/atomic" @@ -585,7 +586,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { cpdirtmp := cpdir + ".tmp" stats.Dir = cpdir - if err := os.MkdirAll(cpdirtmp, 0777); err != nil { + if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return stats, errors.Wrap(err, "create chunk snapshot dir") } cp, err := wal.New(nil, nil, cpdirtmp, h.wal.CompressionEnabled()) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index d6f5d6b88..bfe630a7a 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -262,7 +262,7 @@ type FileWriter struct { } func NewFileWriter(name string) (*FileWriter, error) { - f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0666) + f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o666) if err != nil { return nil, err } @@ -903,7 +903,6 @@ func (w *Writer) writePostingsToTmpFiles() error { values := make([]uint32, 0, len(postings[sid])) for v := range postings[sid] { values = append(values, v) - } // Symbol numbers are in order, so the strings will also be in order. sort.Sort(uint32slice(values)) @@ -1265,7 +1264,7 @@ type Symbols struct { const symbolFactor = 32 // NewSymbols returns a Symbols object for symbol lookups. -func NewSymbols(bs ByteSlice, version int, off int) (*Symbols, error) { +func NewSymbols(bs ByteSlice, version, off int) (*Symbols, error) { s := &Symbols{ bs: bs, version: version, @@ -1504,7 +1503,7 @@ func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string } else { d.Skip(skip) } - s := yoloString(d.UvarintBytes()) //Label value. + s := yoloString(d.UvarintBytes()) // Label value. values = append(values, s) if s == lastVal { break diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 0e75d3758..8fdc63069 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -154,7 +154,7 @@ func TestIndexRW_Create_Open(t *testing.T) { require.NoError(t, ir.Close()) // Modify magic header must cause open to fail. - f, err := os.OpenFile(fn, os.O_WRONLY, 0666) + f, err := os.OpenFile(fn, os.O_WRONLY, 0o666) require.NoError(t, err) _, err = f.WriteAt([]byte{0, 0}, 0) require.NoError(t, err) @@ -340,7 +340,6 @@ func TestPostingsMany(t *testing.T) { } require.Equal(t, exp, got, fmt.Sprintf("input: %v", c.in)) } - } func TestPersistence_index_e2e(t *testing.T) { @@ -504,7 +503,7 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) { dir := testutil.NewTemporaryDirectory("block", t) idxName := filepath.Join(dir.Path(), "index") - err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0666) + err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0o666) require.NoError(t, err) _, err = NewFileReader(idxName) diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index bbf5332a5..4ec85aee4 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -67,7 +67,7 @@ func TestIntersect(t *testing.T) { a := newListPostings(1, 2, 3) b := newListPostings(2, 3, 4) - var cases = []struct { + cases := []struct { in []Postings res Postings @@ -182,7 +182,7 @@ func TestIntersect(t *testing.T) { } func TestMultiIntersect(t *testing.T) { - var cases = []struct { + cases := []struct { p [][]uint64 res []uint64 }{ @@ -320,7 +320,7 @@ func TestMultiMerge(t *testing.T) { } func TestMergedPostings(t *testing.T) { - var cases = []struct { + cases := []struct { in []Postings res Postings @@ -424,7 +424,7 @@ func TestMergedPostings(t *testing.T) { } func TestMergedPostingsSeek(t *testing.T) { - var cases = []struct { + cases := []struct { a, b []uint64 seek uint64 @@ -486,7 +486,7 @@ func TestMergedPostingsSeek(t *testing.T) { } func TestRemovedPostings(t *testing.T) { - var cases = []struct { + cases := []struct { a, b []uint64 res []uint64 }{ @@ -535,7 +535,6 @@ func TestRemovedPostings(t *testing.T) { require.NoError(t, err) require.Equal(t, c.res, res) } - } func TestRemovedNextStackoverflow(t *testing.T) { @@ -561,7 +560,7 @@ func TestRemovedNextStackoverflow(t *testing.T) { } func TestRemovedPostingsSeek(t *testing.T) { - var cases = []struct { + cases := []struct { a, b []uint64 seek uint64 @@ -738,7 +737,7 @@ func TestIntersectWithMerge(t *testing.T) { } func TestWithoutPostings(t *testing.T) { - var cases = []struct { + cases := []struct { base Postings drop Postings @@ -826,7 +825,6 @@ func BenchmarkPostings_Stats(b *testing.B) { p.Add(seriesID, labels.FromStrings(name, value)) seriesID++ } - } createPostingsLabelValues("__name__", "metrics_name_can_be_very_big_and_bad", 1e3) for i := 0; i < 20; i++ { @@ -845,7 +843,6 @@ func BenchmarkPostings_Stats(b *testing.B) { for n := 0; n < b.N; n++ { p.Stats("__name__") } - } func TestMemPostings_Delete(t *testing.T) { diff --git a/tsdb/index/postingsstats.go b/tsdb/index/postingsstats.go index 2fc2465d9..5e5880720 100644 --- a/tsdb/index/postingsstats.go +++ b/tsdb/index/postingsstats.go @@ -59,7 +59,6 @@ func (m *maxHeap) push(item Stat) { m.minIndex = i } } - } func (m *maxHeap) get() []Stat { diff --git a/tsdb/index/postingsstats_test.go b/tsdb/index/postingsstats_test.go index 910b5a06a..7ce51c795 100644 --- a/tsdb/index/postingsstats_test.go +++ b/tsdb/index/postingsstats_test.go @@ -37,7 +37,6 @@ func TestPostingsStats(t *testing.T) { for i := 0; i < heapLength; i++ { require.Equal(t, uint64(max-i), data[i].Count) } - } func TestPostingsStats2(t *testing.T) { @@ -55,6 +54,7 @@ func TestPostingsStats2(t *testing.T) { require.Equal(t, 4, len(data)) require.Equal(t, uint64(11), data[0].Count) } + func BenchmarkPostingStatsMaxHep(b *testing.B) { stats := &maxHeap{} max := 9000000 @@ -71,5 +71,4 @@ func BenchmarkPostingStatsMaxHep(b *testing.B) { } stats.get() } - } diff --git a/tsdb/querier.go b/tsdb/querier.go index 18a1fd20a..a0bf762c1 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -606,6 +606,7 @@ func (p *populateWithDelGenericSeriesIterator) Err() error { return p.err } func (p *populateWithDelGenericSeriesIterator) toSeriesIterator() chunkenc.Iterator { return &populateWithDelSeriesIterator{populateWithDelGenericSeriesIterator: p} } + func (p *populateWithDelGenericSeriesIterator) toChunkSeriesIterator() chunks.Iterator { return &populateWithDelChunkSeriesIterator{populateWithDelGenericSeriesIterator: p} } @@ -779,7 +780,7 @@ func (b *blockChunkSeriesSet) At() storage.ChunkSeries { } // NewMergedStringIter returns string iterator that allows to merge symbols on demand and stream result. -func NewMergedStringIter(a index.StringIter, b index.StringIter) index.StringIter { +func NewMergedStringIter(a, b index.StringIter) index.StringIter { return &mergedStringIter{a: a, b: b, aok: a.Next(), bok: b.Next()} } @@ -875,7 +876,6 @@ Outer: if ts <= tr.Maxt { return true - } it.Intervals = it.Intervals[1:] } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 514b1f5b9..c90aadaf3 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -918,7 +918,7 @@ func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) { // The subset are all equivalent so this does not capture merging of partial or non-overlapping sets well. // TODO(bwplotka): Merge with storage merged series set benchmark. func BenchmarkMergedSeriesSet(b *testing.B) { - var sel = func(sets []storage.SeriesSet) storage.SeriesSet { + sel := func(sets []storage.SeriesSet) storage.SeriesSet { return storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) } @@ -1860,7 +1860,6 @@ func TestPostingsForMatchers(t *testing.T) { t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp) } } - } // TestClose ensures that calling Close more than once doesn't block and doesn't panic. @@ -2116,7 +2115,12 @@ func TestBlockBaseSeriesSet(t *testing.T) { { lset: labels.New([]labels.Label{{Name: "a", Value: "a"}}...), chunks: []chunks.Meta{ - {Ref: 29}, {Ref: 45}, {Ref: 245}, {Ref: 123}, {Ref: 4232}, {Ref: 5344}, + {Ref: 29}, + {Ref: 45}, + {Ref: 245}, + {Ref: 123}, + {Ref: 4232}, + {Ref: 5344}, {Ref: 121}, }, ref: 12, diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 3bfb1be93..192c29ce7 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -41,10 +41,8 @@ const ( Exemplars Type = 4 ) -var ( - // ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. - ErrNotFound = errors.New("not found") -) +// ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. +var ErrNotFound = errors.New("not found") // RefSeries is the series labels with the series ID. type RefSeries struct { @@ -69,8 +67,7 @@ type RefExemplar struct { // Decoder decodes series, sample, and tombstone records. // The zero value is ready to use. -type Decoder struct { -} +type Decoder struct{} // Type returns the type of the record. // Returns RecordUnknown if no valid record type is found. @@ -225,8 +222,7 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp // Encoder encodes series, sample, and tombstones records. // The zero value is ready to use. -type Encoder struct { -} +type Encoder struct{} // Series appends the encoded series to b and returns the resulting slice. func (e *Encoder) Series(series []RefSeries, b []byte) []byte { diff --git a/tsdb/repair_test.go b/tsdb/repair_test.go index 7fb2720fd..dc0fa598e 100644 --- a/tsdb/repair_test.go +++ b/tsdb/repair_test.go @@ -78,7 +78,7 @@ func TestRepairBadIndexVersion(t *testing.T) { require.Error(t, err) // Touch chunks dir in block to imitate them. - require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0o777)) // Read current index to check integrity. r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename)) diff --git a/tsdb/test/hash_test.go b/tsdb/test/hash_test.go index 1778f0f86..1242f5db5 100644 --- a/tsdb/test/hash_test.go +++ b/tsdb/test/hash_test.go @@ -81,7 +81,6 @@ func fnv64a(b []byte) uint64 { } func BenchmarkCRC32_diff(b *testing.B) { - data := [][]byte{} for i := 0; i < 1000; i++ { diff --git a/tsdb/tombstones/tombstones_test.go b/tsdb/tombstones/tombstones_test.go index fa1825d17..aee8568eb 100644 --- a/tsdb/tombstones/tombstones_test.go +++ b/tsdb/tombstones/tombstones_test.go @@ -77,7 +77,7 @@ func TestDeletingTombstones(t *testing.T) { require.NoError(t, err) require.Equal(t, intervals, dranges) - stones.DeleteTombstones(map[uint64]struct{}{ref: struct{}{}}) + stones.DeleteTombstones(map[uint64]struct{}{ref: {}}) intervals, err = stones.Get(ref) require.NoError(t, err) diff --git a/tsdb/tsdbutil/buffer.go b/tsdb/tsdbutil/buffer.go index a24d50472..3e136bb1d 100644 --- a/tsdb/tsdbutil/buffer.go +++ b/tsdb/tsdbutil/buffer.go @@ -210,7 +210,7 @@ func (r *sampleRing) last() (int64, float64, bool) { func (r *sampleRing) samples() []sample { res := make([]sample, r.l) - var k = r.f + r.l + k := r.f + r.l var j int if k > len(r.buf) { k = len(r.buf) diff --git a/tsdb/tsdbutil/chunks.go b/tsdb/tsdbutil/chunks.go index 5ae58b0a8..ffe9c05e0 100644 --- a/tsdb/tsdbutil/chunks.go +++ b/tsdb/tsdbutil/chunks.go @@ -67,7 +67,7 @@ func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { } // GenerateSamples starting at start and counting up numSamples. -func GenerateSamples(start int, numSamples int) []Sample { +func GenerateSamples(start, numSamples int) []Sample { samples := make([]Sample, 0, numSamples) for i := start; i < start+numSamples; i++ { samples = append(samples, sample{ diff --git a/tsdb/wal.go b/tsdb/wal.go index 71fc0a44c..281044a95 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -171,7 +171,7 @@ type SegmentWAL struct { // OpenSegmentWAL opens or creates a write ahead log in the given directory. // The WAL must be read completely before new data is written. func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, r prometheus.Registerer) (*SegmentWAL, error) { - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } df, err := fileutil.OpenDir(dir) @@ -505,7 +505,7 @@ func (w *SegmentWAL) LogDeletes(stones []tombstones.Stone) error { func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { // We must open all files in read/write mode as we may have to truncate along // the way and any file may become the head. - f, err := os.OpenFile(name, os.O_RDWR, 0666) + f, err := os.OpenFile(name, os.O_RDWR, 0o666) if err != nil { return nil, err } diff --git a/tsdb/wal/checkpoint.go b/tsdb/wal/checkpoint.go index eb0d27034..80c0c1036 100644 --- a/tsdb/wal/checkpoint.go +++ b/tsdb/wal/checkpoint.go @@ -129,7 +129,7 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id uint64) bo return nil, errors.Wrap(err, "remove previous temporary checkpoint dir") } - if err := os.MkdirAll(cpdirtmp, 0777); err != nil { + if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return nil, errors.Wrap(err, "create checkpoint dir") } cp, err := New(nil, nil, cpdirtmp, w.CompressionEnabled()) diff --git a/tsdb/wal/checkpoint_test.go b/tsdb/wal/checkpoint_test.go index 9df533df1..2a5ff736a 100644 --- a/tsdb/wal/checkpoint_test.go +++ b/tsdb/wal/checkpoint_test.go @@ -36,37 +36,37 @@ func TestLastCheckpoint(t *testing.T) { _, _, err := LastCheckpoint(dir) require.Equal(t, record.ErrNotFound, err) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0o777)) s, k, err := LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s) require.Equal(t, 0, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s) require.Equal(t, 0, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.1"), s) require.Equal(t, 1, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.1000"), s) require.Equal(t, 1000, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.99999999"), s) require.Equal(t, 99999999, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.100000000"), s) @@ -78,10 +78,10 @@ func TestDeleteCheckpoints(t *testing.T) { require.NoError(t, DeleteCheckpoints(dir, 0)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0o777)) require.NoError(t, DeleteCheckpoints(dir, 2)) @@ -93,9 +93,9 @@ func TestDeleteCheckpoints(t *testing.T) { } require.Equal(t, []string{"checkpoint.02", "checkpoint.03"}, fns) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0o777)) require.NoError(t, DeleteCheckpoints(dir, 100000000)) @@ -233,11 +233,12 @@ func TestCheckpointNoTmpFolderAfterError(t *testing.T) { require.NoError(t, err) var enc record.Encoder require.NoError(t, w.Log(enc.Series([]record.RefSeries{ - {Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")}}, nil))) + {Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")}, + }, nil))) require.NoError(t, w.Close()) // Corrupt data. - f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0666) + f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0o666) require.NoError(t, err) _, err = f.WriteAt([]byte{42}, 1) require.NoError(t, err) diff --git a/tsdb/wal/reader_test.go b/tsdb/wal/reader_test.go index 0513b6782..07dc4dbc5 100644 --- a/tsdb/wal/reader_test.go +++ b/tsdb/wal/reader_test.go @@ -59,100 +59,102 @@ var readerConstructors = map[string]func(io.Reader) reader{ }, } -var data = make([]byte, 100000) -var testReaderCases = []struct { - t []rec - exp [][]byte - fail bool -}{ - // Sequence of valid records. - { - t: []rec{ - {recFull, data[0:200]}, - {recFirst, data[200:300]}, - {recLast, data[300:400]}, - {recFirst, data[400:800]}, - {recMiddle, data[800:900]}, - {recPageTerm, make([]byte, pageSize-900-recordHeaderSize*5-1)}, // exactly lines up with page boundary. - {recLast, data[900:900]}, - {recFirst, data[900:1000]}, - {recMiddle, data[1000:1200]}, - {recMiddle, data[1200:30000]}, - {recMiddle, data[30000:30001]}, - {recMiddle, data[30001:30001]}, - {recLast, data[30001:32000]}, +var ( + data = make([]byte, 100000) + testReaderCases = []struct { + t []rec + exp [][]byte + fail bool + }{ + // Sequence of valid records. + { + t: []rec{ + {recFull, data[0:200]}, + {recFirst, data[200:300]}, + {recLast, data[300:400]}, + {recFirst, data[400:800]}, + {recMiddle, data[800:900]}, + {recPageTerm, make([]byte, pageSize-900-recordHeaderSize*5-1)}, // exactly lines up with page boundary. + {recLast, data[900:900]}, + {recFirst, data[900:1000]}, + {recMiddle, data[1000:1200]}, + {recMiddle, data[1200:30000]}, + {recMiddle, data[30000:30001]}, + {recMiddle, data[30001:30001]}, + {recLast, data[30001:32000]}, + }, + exp: [][]byte{ + data[0:200], + data[200:400], + data[400:900], + data[900:32000], + }, }, - exp: [][]byte{ - data[0:200], - data[200:400], - data[400:900], - data[900:32000], + // Exactly at the limit of one page minus the header size + { + t: []rec{ + {recFull, data[0 : pageSize-recordHeaderSize]}, + }, + exp: [][]byte{ + data[:pageSize-recordHeaderSize], + }, }, - }, - // Exactly at the limit of one page minus the header size - { - t: []rec{ - {recFull, data[0 : pageSize-recordHeaderSize]}, + // More than a full page, this exceeds our buffer and can never happen + // when written by the WAL. + { + t: []rec{ + {recFull, data[0 : pageSize+1]}, + }, + fail: true, }, - exp: [][]byte{ - data[:pageSize-recordHeaderSize], + // Two records the together are too big for a page. + // NB currently the non-live reader succeeds on this. I think this is a bug. + // but we've seen it in production. + { + t: []rec{ + {recFull, data[:pageSize/2]}, + {recFull, data[:pageSize/2]}, + }, + exp: [][]byte{ + data[:pageSize/2], + data[:pageSize/2], + }, }, - }, - // More than a full page, this exceeds our buffer and can never happen - // when written by the WAL. - { - t: []rec{ - {recFull, data[0 : pageSize+1]}, + // Invalid orders of record types. + { + t: []rec{{recMiddle, data[:200]}}, + fail: true, }, - fail: true, - }, - // Two records the together are too big for a page. - // NB currently the non-live reader succeeds on this. I think this is a bug. - // but we've seen it in production. - { - t: []rec{ - {recFull, data[:pageSize/2]}, - {recFull, data[:pageSize/2]}, + { + t: []rec{{recLast, data[:200]}}, + fail: true, }, - exp: [][]byte{ - data[:pageSize/2], - data[:pageSize/2], + { + t: []rec{ + {recFirst, data[:200]}, + {recFull, data[200:400]}, + }, + fail: true, }, - }, - // Invalid orders of record types. - { - t: []rec{{recMiddle, data[:200]}}, - fail: true, - }, - { - t: []rec{{recLast, data[:200]}}, - fail: true, - }, - { - t: []rec{ - {recFirst, data[:200]}, - {recFull, data[200:400]}, + { + t: []rec{ + {recFirst, data[:100]}, + {recMiddle, data[100:200]}, + {recFull, data[200:400]}, + }, + fail: true, }, - fail: true, - }, - { - t: []rec{ - {recFirst, data[:100]}, - {recMiddle, data[100:200]}, - {recFull, data[200:400]}, + // Non-zero data after page termination. + { + t: []rec{ + {recFull, data[:100]}, + {recPageTerm, append(make([]byte, pageSize-recordHeaderSize-102), 1)}, + }, + exp: [][]byte{data[:100]}, + fail: true, }, - fail: true, - }, - // Non-zero data after page termination. - { - t: []rec{ - {recFull, data[:100]}, - {recPageTerm, append(make([]byte, pageSize-recordHeaderSize-102), 1)}, - }, - exp: [][]byte{data[:100]}, - fail: true, - }, -} + } +) func encodedRecord(t recType, b []byte) []byte { if t == recPageTerm { @@ -279,6 +281,7 @@ type multiReadCloser struct { func (m *multiReadCloser) Read(p []byte) (n int, err error) { return m.reader.Read(p) } + func (m *multiReadCloser) Close() error { return tsdb_errors.NewMulti(tsdb_errors.CloseAll(m.closers)).Err() } @@ -439,7 +442,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { err = w.Close() require.NoError(t, err) - segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0666) + segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0o666) require.NoError(t, err) err = segmentFile.Truncate(pageSize / 2) @@ -479,7 +482,7 @@ func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) { err = w.Close() require.NoError(t, err) - segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0666) + segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0o666) require.NoError(t, err) // Override the record length diff --git a/tsdb/wal/wal.go b/tsdb/wal/wal.go index 7254fc4c7..872e83a4d 100644 --- a/tsdb/wal/wal.go +++ b/tsdb/wal/wal.go @@ -118,7 +118,7 @@ func (e *CorruptionErr) Error() string { // OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) - f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666) + f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0o666) if err != nil { return nil, err } @@ -144,7 +144,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { // CreateSegment creates a new segment k in dir. func CreateSegment(dir string, k int) (*Segment, error) { - f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o666) if err != nil { return nil, err } @@ -260,7 +260,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, errors.Wrap(err, "create dir") } if logger == nil { diff --git a/tsdb/wal/wal_test.go b/tsdb/wal/wal_test.go index d73a94fbb..55cd6caa1 100644 --- a/tsdb/wal/wal_test.go +++ b/tsdb/wal/wal_test.go @@ -141,7 +141,7 @@ func TestWALRepair_ReadingError(t *testing.T) { require.NoError(t, w.Close()) - f, err := os.OpenFile(SegmentName(dir, test.corrSgm), os.O_RDWR, 0666) + f, err := os.OpenFile(SegmentName(dir, test.corrSgm), os.O_RDWR, 0o666) require.NoError(t, err) // Apply corruption function. @@ -167,7 +167,7 @@ func TestWALRepair_ReadingError(t *testing.T) { for r.Next() { } - //Close the segment so we don't break things on Windows. + // Close the segment so we don't break things on Windows. s.Close() // No corruption in this segment. @@ -244,7 +244,7 @@ func TestCorruptAndCarryOn(t *testing.T) { segments, err := listSegments(dir) require.NoError(t, err) for _, segment := range segments { - f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", segment.index)), os.O_RDONLY, 0666) + f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", segment.index)), os.O_RDONLY, 0o666) require.NoError(t, err) fi, err := f.Stat() @@ -261,7 +261,7 @@ func TestCorruptAndCarryOn(t *testing.T) { // Truncate the first file, splitting the middle record in the second // page in half, leaving 4 valid records. { - f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", 0)), os.O_RDWR, 0666) + f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", 0)), os.O_RDWR, 0o666) require.NoError(t, err) fi, err := f.Stat() diff --git a/tsdb/wal/watcher_test.go b/tsdb/wal/watcher_test.go index a1786f823..58800431b 100644 --- a/tsdb/wal/watcher_test.go +++ b/tsdb/wal/watcher_test.go @@ -29,9 +29,11 @@ import ( "github.com/prometheus/prometheus/tsdb/record" ) -var defaultRetryInterval = 100 * time.Millisecond -var defaultRetries = 100 -var wMetrics = NewWatcherMetrics(prometheus.DefaultRegisterer) +var ( + defaultRetryInterval = 100 * time.Millisecond + defaultRetries = 100 + wMetrics = NewWatcherMetrics(prometheus.DefaultRegisterer) +) // retry executes f() n times at each interval until it returns true. func retry(t *testing.T, interval time.Duration, n int, f func() bool) { @@ -112,7 +114,7 @@ func TestTailSamples(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} @@ -201,7 +203,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) w, err := NewSize(nil, nil, wdir, 128*pageSize, compress) @@ -271,7 +273,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} @@ -358,7 +360,7 @@ func TestReadCheckpoint(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) os.Create(SegmentName(wdir, 30)) @@ -426,7 +428,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} @@ -462,7 +464,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { // At this point we should have at least 6 segments, lets create a checkpoint dir of the first 5. checkpointDir := dir + "/wal/checkpoint.000004" - err = os.Mkdir(checkpointDir, 0777) + err = os.Mkdir(checkpointDir, 0o777) require.NoError(t, err) for i := 0; i <= 4; i++ { err := os.Rename(SegmentName(dir+"/wal", i), SegmentName(checkpointDir, i)) @@ -504,7 +506,7 @@ func TestCheckpointSeriesReset(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} diff --git a/tsdb/wal_test.go b/tsdb/wal_test.go index fdcd6ce52..ba8694b7a 100644 --- a/tsdb/wal_test.go +++ b/tsdb/wal_test.go @@ -326,7 +326,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "truncate_checksum", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() @@ -339,7 +339,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "truncate_body", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() @@ -352,7 +352,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "body_content", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() @@ -367,7 +367,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "checksum", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() diff --git a/util/osutil/hostname.go b/util/osutil/hostname.go index 224dffe7c..c44cb391b 100644 --- a/util/osutil/hostname.go +++ b/util/osutil/hostname.go @@ -49,14 +49,12 @@ func GetFQDN() (string, error) { if fqdn, err := lookup(ip); err == nil { return fqdn, nil } - } if ip := addr.To16(); ip != nil { if fqdn, err := lookup(ip); err == nil { return fqdn, nil } - } } return hostname, nil diff --git a/util/strutil/strconv.go b/util/strutil/strconv.go index 3d96e4faf..eed0134ab 100644 --- a/util/strutil/strconv.go +++ b/util/strutil/strconv.go @@ -19,9 +19,7 @@ import ( "regexp" ) -var ( - invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) -) +var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) // TableLinkForExpression creates an escaped relative link to the table view of // the provided expression. diff --git a/util/testutil/roundtrip.go b/util/testutil/roundtrip.go index 996d11f36..a93991a13 100644 --- a/util/testutil/roundtrip.go +++ b/util/testutil/roundtrip.go @@ -43,5 +43,7 @@ func NewRoundTripCheckRequest(checkRequest func(*http.Request), theResponse *htt checkRequest: checkRequest, roundTrip: roundTrip{ theResponse: theResponse, - theError: theError}} + theError: theError, + }, + } } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 4d85a5688..dc9c01d9a 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -75,9 +75,7 @@ const ( errorNotFound errorType = "not_found" ) -var ( - LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} -) +var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} type apiError struct { typ errorType @@ -1453,7 +1451,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult { rand.Int63()) dir = filepath.Join(snapdir, name) ) - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot directory")}, nil, nil} } if err := api.db.Snapshot(dir, !skipHead); err != nil { @@ -1509,7 +1507,6 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter Error: apiErr.err.Error(), Data: data, }) - if err != nil { level.Error(api.logger).Log("msg", "error marshaling json response", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) @@ -1652,7 +1649,7 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteMore() stream.WriteObjectField(`timestamp`) marshalTimestamp(p.Ts, stream) - //marshalTimestamp(p.Ts, stream) + // marshalTimestamp(p.Ts, stream) stream.WriteObjectEnd() } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index b839e61cd..a04a3d8db 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -126,9 +126,7 @@ func newTestTargetRetriever(targetsInfo []*testTargetParams) *testTargetRetrieve } } -var ( - scrapeStart = time.Now().Add(-11 * time.Second) -) +var scrapeStart = time.Now().Add(-11 * time.Second) func (t testTargetRetriever) TargetsActive() map[string][]*scrape.Target { return t.activeTargets @@ -452,7 +450,6 @@ func TestEndpoints(t *testing.T) { testEndpoints(t, api, testTargetRetriever, suite.ExemplarStorage(), false) }) - } func TestLabelNames(t *testing.T) { @@ -651,7 +648,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E exemplars []exemplar.QueryResult } - var tests = []test{ + tests := []test{ { endpoint: api.query, query: url.Values{ @@ -2137,7 +2134,7 @@ func assertAPIError(t *testing.T, got *apiError, exp errorType) { } } -func assertAPIResponse(t *testing.T, got interface{}, exp interface{}) { +func assertAPIResponse(t *testing.T, got, exp interface{}) { t.Helper() require.Equal(t, exp, got) @@ -2179,6 +2176,7 @@ func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) { h, _ := tsdb.NewHead(nil, nil, nil, opts, nil) return h.Stats(statsByLabelName), nil } + func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) { return tsdb.WALReplayStatus{}, nil } @@ -2449,7 +2447,7 @@ func TestParseTimeParam(t *testing.T) { ts, err := parseTime("1582468023986") require.NoError(t, err) - var tests = []struct { + tests := []struct { paramName string paramValue string defaultValue time.Time @@ -2508,7 +2506,7 @@ func TestParseTime(t *testing.T) { panic(err) } - var tests = []struct { + tests := []struct { input string fail bool result time.Time @@ -2516,25 +2514,32 @@ func TestParseTime(t *testing.T) { { input: "", fail: true, - }, { + }, + { input: "abc", fail: true, - }, { + }, + { input: "30s", fail: true, - }, { + }, + { input: "123", result: time.Unix(123, 0), - }, { + }, + { input: "123.123", result: time.Unix(123, 123000000), - }, { + }, + { input: "2015-06-03T13:21:58.555Z", result: ts, - }, { + }, + { input: "2015-06-03T14:21:58.555+01:00", result: ts, - }, { + }, + { // Test float rounding. input: "1543578564.705", result: time.Unix(1543578564, 705*1e6), @@ -2566,7 +2571,7 @@ func TestParseTime(t *testing.T) { } func TestParseDuration(t *testing.T) { - var tests = []struct { + tests := []struct { input string fail bool result time.Duration diff --git a/web/web.go b/web/web.go index 269abd0bf..f4e5fd65c 100644 --- a/web/web.go +++ b/web/web.go @@ -607,7 +607,6 @@ func (h *Handler) Run(ctx context.Context, listener net.Listener, webConfig stri } func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) { - var groups []*rules.Group for _, group := range h.ruleManager.RuleGroups() { if group.HasAlertingRules() {