From 8858a276c2f370638139ea09ace63aef20e773cb Mon Sep 17 00:00:00 2001 From: Olivier Lemasle Date: Tue, 19 Oct 2021 18:08:00 +0200 Subject: [PATCH 001/161] Fix link to React App's README (#9532) Signed-off-by: Olivier Lemasle --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4a1aadce56..15c185418f 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ You can build a docker image locally with the following commands: ## React UI Development -For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](web/ui/react-app/README.md). +For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](web/ui/README.md). ## More information From 2a574bf8c29c98f9ba531c31e1066535dead0e16 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 19 Oct 2021 21:26:39 +0200 Subject: [PATCH 002/161] Uyuni: enable HTTP2 again (#9539) PR #9398 re-introduced HTTP2 in Prometheus. Uyuni SD can use the client without disabling it. Signed-off-by: Julien Pivotto --- discovery/uyuni/uyuni.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index 080f17a8c2..0a9c1fbf2c 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -210,7 +210,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { *apiURL = *conf.Server.URL apiURL.Path = path.Join(apiURL.Path, uyuniXMLRPCAPIPath) - rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "uyuni_sd", config.WithHTTP2Disabled()) + rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "uyuni_sd") if err != nil { return nil, err } From df1bae051482edd8815b4367ff87e23fab719771 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 19 Oct 2021 21:26:52 +0200 Subject: [PATCH 003/161] Add support for security-related HTTP headers (#9546) Signed-off-by: Julien Pivotto --- docs/configuration/https.md | 24 ++++++++++++++++++++++++ go.mod | 2 +- go.sum | 3 ++- 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/docs/configuration/https.md b/docs/configuration/https.md index 1799739d0a..c060ec4288 100644 --- a/docs/configuration/https.md +++ b/docs/configuration/https.md @@ -73,6 +73,30 @@ http_server_config: # Enable HTTP/2 support. Note that HTTP/2 is only supported with TLS. # This can not be changed on the fly. [ http2: | default = true ] + # List of headers that can be added to HTTP responses. + [ headers: + # Set the Content-Security-Policy header to HTTP responses. + # Unset if blank. + [ Content-Security-Policy: ] + # Set the X-Frame-Options header to HTTP responses. + # Unset if blank. Accepted values are deny and sameorigin. + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options + [ X-Frame-Options: ] + # Set the X-Content-Type-Options header to HTTP responses. + # Unset if blank. Accepted value is nosniff. + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options + [ X-Content-Type-Options: ] + # Set the X-XSS-Protection header to all responses. + # Unset if blank. Accepted value is nosniff. + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection + [ X-XSS-Protection: ] + # Set the Strict-Transport-Security header to HTTP responses. + # Unset if blank. + # Please make sure that you use this with care as this header might force + # browsers to load Prometheus and the other applications hosted on the same + # domain and subdomains over HTTPS. + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security + [ Strict-Transport-Security: ] ] # Usernames and hashed passwords that have full access to the web # server via basic authentication. If empty, no basic authentication is diff --git a/go.mod b/go.mod index 3fb313c399..82de9c1fe1 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.31.1 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.6.1 + github.com/prometheus/exporter-toolkit v0.7.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 diff --git a/go.sum b/go.sum index 742048ffc3..4fda1999c0 100644 --- a/go.sum +++ b/go.sum @@ -1147,8 +1147,9 @@ github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1 github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0= github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/exporter-toolkit v0.7.0 h1:XtYeVeeC5daG4txbc9+mieKq+/AK4gtIBLl9Mulrjnk= +github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= From 68bc92b520983c404d24c726ebee2cfd83b26ec6 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 19 Oct 2021 23:38:44 +0200 Subject: [PATCH 004/161] uyuni: small fixes for comments (#9538) * uyuni: small fixes for comments Signed-off-by: Julien Pivotto --- discovery/uyuni/uyuni.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index 0a9c1fbf2c..a6b2b6fce0 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -71,7 +71,6 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` } -// Uyuni API Response structures type systemGroupID struct { GroupID int `xmlrpc:"id"` GroupName string `xmlrpc:"name"` @@ -142,19 +141,16 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -// Attempt to login in Uyuni Server and get an auth token func login(rpcclient *xmlrpc.Client, user string, pass string) (string, error) { var result string err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result) return result, err } -// Logout from Uyuni API func logout(rpcclient *xmlrpc.Client, token string) error { return rpcclient.Call("auth.logout", token, nil) } -// Get the system groups information of monitored clients func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token string, entitlement string) (map[int][]systemGroupID, error) { var systemGroupsInfos []struct { SystemID int `xmlrpc:"id"` @@ -173,7 +169,6 @@ func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token strin return result, nil } -// GetSystemNetworkInfo lists client FQDNs. func getNetworkInformationForSystems(rpcclient *xmlrpc.Client, token string, systemIDs []int) (map[int]networkInfo, error) { var networkInfos []networkInfo err := rpcclient.Call("system.getNetworkForSystems", []interface{}{token, systemIDs}, &networkInfos) @@ -188,7 +183,6 @@ func getNetworkInformationForSystems(rpcclient *xmlrpc.Client, token string, sys return result, nil } -// Get endpoints information for given systems func getEndpointInfoForSystems( rpcclient *xmlrpc.Client, token string, From 432005826d06e6d6ac8f2d3836f7fdf66d9c3092 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Wed, 20 Oct 2021 10:15:54 +0200 Subject: [PATCH 005/161] Add a feature flag to enable the new discovery manager (#9537) * Add a feature flag to enable the new manager This PR creates a copy of the legacy manager and uses it by default. It is a companion PR to #9349. With this PR, users can enable the new discovery manager and provide us with any feedback / side effects that the new behaviour might have. Signed-off-by: Julien Pivotto --- cmd/prometheus/main.go | 34 +- discovery/legacymanager/manager.go | 357 +++++++ discovery/legacymanager/manager_test.go | 1143 +++++++++++++++++++++++ discovery/legacymanager/registry.go | 259 +++++ discovery/manager.go | 2 +- docs/feature_flags.md | 24 +- 6 files changed, 1809 insertions(+), 10 deletions(-) create mode 100644 discovery/legacymanager/manager.go create mode 100644 discovery/legacymanager/manager_test.go create mode 100644 discovery/legacymanager/registry.go diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 6eb850f383..d50cd3e7f3 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -58,6 +58,8 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" _ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations. + "github.com/prometheus/prometheus/discovery/legacymanager" + "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/pkg/exemplar" "github.com/prometheus/prometheus/pkg/labels" @@ -122,6 +124,7 @@ type flagConfig struct { enablePromQLAtModifier bool enablePromQLNegativeOffset bool enableExpandExternalLabels bool + enableNewSDManager bool prometheusURL string corsRegexString string @@ -156,6 +159,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "extra-scrape-metrics": c.scrape.ExtraMetrics = true level.Info(logger).Log("msg", "Experimental additional scrape metrics") + case "new-service-discovery-manager": + c.enableNewSDManager = true + level.Info(logger).Log("msg", "Experimental service discovery manager") case "": continue default: @@ -319,7 +325,7 @@ func main() { a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return."). Default("50000000").IntVar(&cfg.queryMaxSamples) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics, new-service-discovery-manager. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) promlogflag.AddFlags(a, &cfg.promlogConfig) @@ -459,11 +465,22 @@ func main() { notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) ctxScrape, cancelScrape = context.WithCancel(context.Background()) - discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape")) - ctxNotify, cancelNotify = context.WithCancel(context.Background()) - discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify")) + discoveryManagerScrape discoveryManager + discoveryManagerNotify discoveryManager + ) + if cfg.enableNewSDManager { + discovery.RegisterMetrics() + discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape")) + discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify")) + } else { + legacymanager.RegisterMetrics() + discoveryManagerScrape = legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), legacymanager.Name("scrape")) + discoveryManagerNotify = legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), legacymanager.Name("notify")) + } + + var ( scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage) opts = promql.EngineOpts{ @@ -1346,3 +1363,12 @@ func (l jaegerLogger) Infof(msg string, args ...interface{}) { keyvals := []interface{}{"msg", fmt.Sprintf(msg, args...)} level.Info(l.logger).Log(keyvals...) } + +// discoveryManager interfaces the discovery manager. This is used to keep using +// the manager that restarts SD's on reload for a few releases until we feel +// the new manager can be enabled for all users. +type discoveryManager interface { + ApplyConfig(cfg map[string]discovery.Configs) error + Run() error + SyncCh() <-chan map[string][]*targetgroup.Group +} diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go new file mode 100644 index 0000000000..7a3d6b3b82 --- /dev/null +++ b/discovery/legacymanager/manager.go @@ -0,0 +1,357 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package legacymanager + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +var ( + failedConfigs = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_sd_failed_configs", + Help: "Current number of service discovery configurations that failed to load.", + }, + []string{"name"}, + ) + discoveredTargets = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_sd_discovered_targets", + Help: "Current number of discovered targets.", + }, + []string{"name", "config"}, + ) + receivedUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_received_updates_total", + Help: "Total number of update events received from the SD providers.", + }, + []string{"name"}, + ) + delayedUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_delayed_total", + Help: "Total number of update events that couldn't be sent immediately.", + }, + []string{"name"}, + ) + sentUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_total", + Help: "Total number of update events sent to the SD consumers.", + }, + []string{"name"}, + ) +) + +func RegisterMetrics() { + prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates) +} + +type poolKey struct { + setName string + provider string +} + +// provider holds a Discoverer instance, its configuration and its subscribers. +type provider struct { + name string + d discovery.Discoverer + subs []string + config interface{} +} + +// NewManager is the Discovery Manager constructor. +func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager { + if logger == nil { + logger = log.NewNopLogger() + } + mgr := &Manager{ + logger: logger, + syncCh: make(chan map[string][]*targetgroup.Group), + targets: make(map[poolKey]map[string]*targetgroup.Group), + discoverCancel: []context.CancelFunc{}, + ctx: ctx, + updatert: 5 * time.Second, + triggerSend: make(chan struct{}, 1), + } + for _, option := range options { + option(mgr) + } + return mgr +} + +// Name sets the name of the manager. +func Name(n string) func(*Manager) { + return func(m *Manager) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.name = n + } +} + +// Manager maintains a set of discovery providers and sends each update to a map channel. +// Targets are grouped by the target set name. +type Manager struct { + logger log.Logger + name string + mtx sync.RWMutex + ctx context.Context + discoverCancel []context.CancelFunc + + // Some Discoverers(eg. k8s) send only the updates for a given target group + // so we use map[tg.Source]*targetgroup.Group to know which group to update. + targets map[poolKey]map[string]*targetgroup.Group + // providers keeps track of SD providers. + providers []*provider + // The sync channel sends the updates as a map where the key is the job value from the scrape config. + syncCh chan map[string][]*targetgroup.Group + + // How long to wait before sending updates to the channel. The variable + // should only be modified in unit tests. + updatert time.Duration + + // The triggerSend channel signals to the manager that new updates have been received from providers. + triggerSend chan struct{} +} + +// Run starts the background processing +func (m *Manager) Run() error { + go m.sender() + for range m.ctx.Done() { + m.cancelDiscoverers() + return m.ctx.Err() + } + return nil +} + +// SyncCh returns a read only channel used by all the clients to receive target updates. +func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { + return m.syncCh +} + +// ApplyConfig removes all running discovery providers and starts new ones using the provided config. +func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error { + m.mtx.Lock() + defer m.mtx.Unlock() + + for pk := range m.targets { + if _, ok := cfg[pk.setName]; !ok { + discoveredTargets.DeleteLabelValues(m.name, pk.setName) + } + } + m.cancelDiscoverers() + m.targets = make(map[poolKey]map[string]*targetgroup.Group) + m.providers = nil + m.discoverCancel = nil + + failedCount := 0 + for name, scfg := range cfg { + failedCount += m.registerProviders(scfg, name) + discoveredTargets.WithLabelValues(m.name, name).Set(0) + } + failedConfigs.WithLabelValues(m.name).Set(float64(failedCount)) + + for _, prov := range m.providers { + m.startProvider(m.ctx, prov) + } + + return nil +} + +// StartCustomProvider is used for sdtool. Only use this if you know what you're doing. +func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) { + p := &provider{ + name: name, + d: worker, + subs: []string{name}, + } + m.providers = append(m.providers, p) + m.startProvider(ctx, p) +} + +func (m *Manager) startProvider(ctx context.Context, p *provider) { + level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) + ctx, cancel := context.WithCancel(ctx) + updates := make(chan []*targetgroup.Group) + + m.discoverCancel = append(m.discoverCancel, cancel) + + go p.d.Run(ctx, updates) + go m.updater(ctx, p, updates) +} + +func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) { + for { + select { + case <-ctx.Done(): + return + case tgs, ok := <-updates: + receivedUpdates.WithLabelValues(m.name).Inc() + if !ok { + level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + return + } + + for _, s := range p.subs { + m.updateGroup(poolKey{setName: s, provider: p.name}, tgs) + } + + select { + case m.triggerSend <- struct{}{}: + default: + } + } + } +} + +func (m *Manager) sender() { + ticker := time.NewTicker(m.updatert) + defer ticker.Stop() + + for { + select { + case <-m.ctx.Done(): + return + case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. + select { + case <-m.triggerSend: + sentUpdates.WithLabelValues(m.name).Inc() + select { + case m.syncCh <- m.allGroups(): + default: + delayedUpdates.WithLabelValues(m.name).Inc() + level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") + select { + case m.triggerSend <- struct{}{}: + default: + } + } + default: + } + } + } +} + +func (m *Manager) cancelDiscoverers() { + for _, c := range m.discoverCancel { + c() + } +} + +func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if _, ok := m.targets[poolKey]; !ok { + m.targets[poolKey] = make(map[string]*targetgroup.Group) + } + for _, tg := range tgs { + if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. + m.targets[poolKey][tg.Source] = tg + } + } +} + +func (m *Manager) allGroups() map[string][]*targetgroup.Group { + m.mtx.RLock() + defer m.mtx.RUnlock() + + tSets := map[string][]*targetgroup.Group{} + n := map[string]int{} + for pkey, tsets := range m.targets { + for _, tg := range tsets { + // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' + // to signal that it needs to stop all scrape loops for this target set. + tSets[pkey.setName] = append(tSets[pkey.setName], tg) + n[pkey.setName] += len(tg.Targets) + } + } + for setName, v := range n { + discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v)) + } + return tSets +} + +// registerProviders returns a number of failed SD config. +func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int { + var ( + failed int + added bool + ) + add := func(cfg discovery.Config) { + for _, p := range m.providers { + if reflect.DeepEqual(cfg, p.config) { + p.subs = append(p.subs, setName) + added = true + return + } + } + typ := cfg.Name() + d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ + Logger: log.With(m.logger, "discovery", typ), + }) + if err != nil { + level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) + failed++ + return + } + m.providers = append(m.providers, &provider{ + name: fmt.Sprintf("%s/%d", typ, len(m.providers)), + d: d, + config: cfg, + subs: []string{setName}, + }) + added = true + } + for _, cfg := range cfgs { + add(cfg) + } + if !added { + // Add an empty target group to force the refresh of the corresponding + // scrape pool and to notify the receiver that this target set has no + // current targets. + // It can happen because the combined set of SD configurations is empty + // or because we fail to instantiate all the SD configurations. + add(discovery.StaticConfig{{}}) + } + return failed +} + +// StaticProvider holds a list of target groups that never change. +type StaticProvider struct { + TargetGroups []*targetgroup.Group +} + +// Run implements the Worker interface. +func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + // We still have to consider that the consumer exits right away in which case + // the context will be canceled. + select { + case ch <- sd.TargetGroups: + case <-ctx.Done(): + } + close(ch) +} diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go new file mode 100644 index 0000000000..a977fcd292 --- /dev/null +++ b/discovery/legacymanager/manager_test.go @@ -0,0 +1,1143 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package legacymanager + +import ( + "context" + "fmt" + "sort" + "strconv" + "testing" + "time" + + "github.com/go-kit/log" + client_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/testutil" +) + +func TestMain(m *testing.M) { + testutil.TolerantVerifyLeak(m) +} + +// TestTargetUpdatesOrder checks that the target updates are received in the expected order. +func TestTargetUpdatesOrder(t *testing.T) { + + // The order by which the updates are send is determined by the interval passed to the mock discovery adapter + // Final targets array is ordered alphabetically by the name of the discoverer. + // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. + testCases := []struct { + title string + updates map[string][]update + expectedTargets [][]*targetgroup.Group + }{ + { + title: "Single TP no updates", + updates: map[string][]update{ + "tp1": {}, + }, + expectedTargets: nil, + }, + { + title: "Multiple TPs no updates", + updates: map[string][]update{ + "tp1": {}, + "tp2": {}, + "tp3": {}, + }, + expectedTargets: nil, + }, + { + title: "Single TP empty initials", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{}, + interval: 5 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + {}, + }, + }, + { + title: "Multiple TPs empty initials", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{}, + interval: 5 * time.Millisecond, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{}, + interval: 200 * time.Millisecond, + }, + }, + "tp3": { + { + targetGroups: []targetgroup.Group{}, + interval: 100 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + {}, + {}, + {}, + }, + }, + { + title: "Single TP initials only", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }}, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + { + title: "Multiple TPs initials only", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + }, + interval: 10 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + }, + }, + }, + { + title: "Single TP initials followed by empty updates", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 0, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + interval: 10 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + }, + }, + { + title: "Single TP initials and new groups", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 0, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + interval: 10 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + }, + { + title: "Multiple TPs initials and new groups", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 10 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group4", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + interval: 500 * time.Millisecond, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + interval: 100 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group3", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group4", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + interval: 10 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + { + Source: "tp2_group3", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group4", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group4", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + { + Source: "tp2_group3", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group4", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + }, + }, + { + title: "One TP initials arrive after other TP updates.", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 10 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + interval: 150 * time.Millisecond, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + interval: 200 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + interval: 100 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + }, + }, + + { + title: "Single TP empty update in between", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 30 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + interval: 10 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + interval: 300 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + }, + }, + } + + for i, tc := range testCases { + tc := tc + t.Run(tc.title, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + + var totalUpdatesCount int + provUpdates := make(chan []*targetgroup.Group) + for _, up := range tc.updates { + go newMockDiscoveryProvider(up...).Run(ctx, provUpdates) + if len(up) > 0 { + totalUpdatesCount += len(up) + } + } + + for x := 0; x < totalUpdatesCount; x++ { + select { + case <-ctx.Done(): + t.Fatalf("%d: no update arrived within the timeout limit", x) + case tgs := <-provUpdates: + discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) + for _, got := range discoveryManager.allGroups() { + assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string { + return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v", + x, + got, + expected) + }) + } + } + } + }) + } +} + +func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) { + t.Helper() + + // Need to sort by the groups's source as the received order is not guaranteed. + sort.Sort(byGroupSource(got)) + sort.Sort(byGroupSource(expected)) + + require.Equal(t, expected, got) +} + +func staticConfig(addrs ...string) discovery.StaticConfig { + var cfg discovery.StaticConfig + for i, addr := range addrs { + cfg = append(cfg, &targetgroup.Group{ + Source: fmt.Sprint(i), + Targets: []model.LabelSet{ + {model.AddressLabel: model.LabelValue(addr)}, + }, + }) + } + return cfg +} + +func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { + t.Helper() + if _, ok := tSets[poolKey]; !ok { + t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) + return + } + + match := false + var mergedTargets string + for _, targetGroup := range tSets[poolKey] { + + for _, l := range targetGroup.Targets { + mergedTargets = mergedTargets + " " + l.String() + if l.String() == label { + match = true + } + } + + } + if match != present { + msg := "" + if !present { + msg = "not" + } + t.Fatalf("%q should %s be present in Targets labels: %q", label, msg, mergedTargets) + } +} + +func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]discovery.Configs{ + "prometheus": { + staticConfig("foo:9090", "bar:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) + + c["prometheus"] = discovery.Configs{ + staticConfig("foo:9090"), + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false) +} + +func TestDiscovererConfigs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]discovery.Configs{ + "prometheus": { + staticConfig("foo:9090", "bar:9090"), + staticConfig("baz:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true) +} + +// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after +// removing all targets from the static_configs sends an update with empty targetGroups. +// This is required to signal the receiver that this target set has no current targets. +func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]discovery.Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + + c["prometheus"] = discovery.Configs{ + discovery.StaticConfig{{}}, + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + + pkey := poolKey{setName: "prometheus", provider: "static/0"} + targetGroups, ok := discoveryManager.targets[pkey] + if !ok { + t.Fatalf("'%v' should be present in target groups", pkey) + } + group, ok := targetGroups[""] + if !ok { + t.Fatalf("missing '' key in target groups %v", targetGroups) + } + + if len(group.Targets) != 0 { + t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) + } +} + +func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, nil) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]discovery.Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + "prometheus2": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + if len(discoveryManager.providers) != 1 { + t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) + } +} + +func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { + originalConfig := discovery.Configs{ + staticConfig("foo:9090", "bar:9090", "baz:9090"), + } + processedConfig := discovery.Configs{ + staticConfig("foo:9090", "bar:9090", "baz:9090"), + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + cfgs := map[string]discovery.Configs{ + "prometheus": processedConfig, + } + discoveryManager.ApplyConfig(cfgs) + <-discoveryManager.SyncCh() + + for _, cfg := range cfgs { + require.Equal(t, originalConfig, cfg) + } +} + +type errorConfig struct{ err error } + +func (e errorConfig) Name() string { return "error" } +func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Discoverer, error) { + return nil, e.err +} + +func TestGaugeFailedConfigs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]discovery.Configs{ + "prometheus": { + errorConfig{fmt.Errorf("tests error 0")}, + errorConfig{fmt.Errorf("tests error 1")}, + errorConfig{fmt.Errorf("tests error 2")}, + }, + } + discoveryManager.ApplyConfig(c) + <-discoveryManager.SyncCh() + + failedCount := client_testutil.ToFloat64(failedConfigs) + if failedCount != 3 { + t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount) + } + + c["prometheus"] = discovery.Configs{ + staticConfig("foo:9090"), + } + discoveryManager.ApplyConfig(c) + <-discoveryManager.SyncCh() + + failedCount = client_testutil.ToFloat64(failedConfigs) + if failedCount != 0 { + t.Fatalf("Expected to get no failed config, got: %v", failedCount) + } + +} + +func TestCoordinationWithReceiver(t *testing.T) { + updateDelay := 100 * time.Millisecond + + type expect struct { + delay time.Duration + tgs map[string][]*targetgroup.Group + } + + testCases := []struct { + title string + providers map[string]discovery.Discoverer + expected []expect + }{ + { + title: "Receiver should get all updates even when one provider closes its channel", + providers: map[string]discovery.Discoverer{ + "once1": &onceProvider{ + tgs: []*targetgroup.Group{ + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + "mock1": newMockDiscoveryProvider( + update{ + interval: 2 * updateDelay, + targetGroups: []targetgroup.Group{ + { + Source: "tg2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + ), + }, + expected: []expect{ + { + tgs: map[string][]*targetgroup.Group{ + "once1": { + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + }, + { + tgs: map[string][]*targetgroup.Group{ + "once1": { + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + "mock1": { + { + Source: "tg2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + }, + }, + { + title: "Receiver should get all updates even when the channel is blocked", + providers: map[string]discovery.Discoverer{ + "mock1": newMockDiscoveryProvider( + update{ + targetGroups: []targetgroup.Group{ + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + update{ + interval: 4 * updateDelay, + targetGroups: []targetgroup.Group{ + { + Source: "tg2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + ), + }, + expected: []expect{ + { + delay: 2 * updateDelay, + tgs: map[string][]*targetgroup.Group{ + "mock1": { + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + }, + { + delay: 4 * updateDelay, + tgs: map[string][]*targetgroup.Group{ + "mock1": { + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tg2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.title, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + mgr := NewManager(ctx, nil) + mgr.updatert = updateDelay + go mgr.Run() + + for name, p := range tc.providers { + mgr.StartCustomProvider(ctx, name, p) + } + + for i, expected := range tc.expected { + time.Sleep(expected.delay) + select { + case <-ctx.Done(): + t.Fatalf("step %d: no update received in the expected timeframe", i) + case tgs, ok := <-mgr.SyncCh(): + if !ok { + t.Fatalf("step %d: discovery manager channel is closed", i) + } + if len(tgs) != len(expected.tgs) { + t.Fatalf("step %d: target groups mismatch, got: %d, expected: %d\ngot: %#v\nexpected: %#v", + i, len(tgs), len(expected.tgs), tgs, expected.tgs) + } + for k := range expected.tgs { + if _, ok := tgs[k]; !ok { + t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) + } + assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string { + return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected) + }) + } + } + } + }) + } +} + +type update struct { + targetGroups []targetgroup.Group + interval time.Duration +} + +type mockdiscoveryProvider struct { + updates []update +} + +func newMockDiscoveryProvider(updates ...update) mockdiscoveryProvider { + tp := mockdiscoveryProvider{ + updates: updates, + } + return tp +} + +func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgroup.Group) { + for _, u := range tp.updates { + if u.interval > 0 { + select { + case <-ctx.Done(): + return + case <-time.After(u.interval): + } + } + tgs := make([]*targetgroup.Group, len(u.targetGroups)) + for i := range u.targetGroups { + tgs[i] = &u.targetGroups[i] + } + upCh <- tgs + } + <-ctx.Done() +} + +// byGroupSource implements sort.Interface so we can sort by the Source field. +type byGroupSource []*targetgroup.Group + +func (a byGroupSource) Len() int { return len(a) } +func (a byGroupSource) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byGroupSource) Less(i, j int) bool { return a[i].Source < a[j].Source } + +// onceProvider sends updates once (if any) and closes the update channel. +type onceProvider struct { + tgs []*targetgroup.Group +} + +func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { + if len(o.tgs) > 0 { + ch <- o.tgs + } + close(ch) +} diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go new file mode 100644 index 0000000000..fb01e16488 --- /dev/null +++ b/discovery/legacymanager/registry.go @@ -0,0 +1,259 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package legacymanager + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + configFieldPrefix = "AUTO_DISCOVERY_" + staticConfigsKey = "static_configs" + staticConfigsFieldName = configFieldPrefix + staticConfigsKey +) + +var ( + configNames = make(map[string]discovery.Config) + configFieldNames = make(map[reflect.Type]string) + configFields []reflect.StructField + + configTypesMu sync.Mutex + configTypes = make(map[reflect.Type]reflect.Type) + + emptyStructType = reflect.TypeOf(struct{}{}) + configsType = reflect.TypeOf(discovery.Configs{}) +) + +// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling. +func RegisterConfig(config discovery.Config) { + registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config) +} + +func init() { + // N.B.: static_configs is the only Config type implemented by default. + // All other types are registered at init by their implementing packages. + elemTyp := reflect.TypeOf(&targetgroup.Group{}) + registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{}) +} + +func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) { + name := config.Name() + if _, ok := configNames[name]; ok { + panic(fmt.Sprintf("discovery: Config named %q is already registered", name)) + } + configNames[name] = config + + fieldName := configFieldPrefix + yamlKey // Field must be exported. + configFieldNames[elemType] = fieldName + + // Insert fields in sorted order. + i := sort.Search(len(configFields), func(k int) bool { + return fieldName < configFields[k].Name + }) + configFields = append(configFields, reflect.StructField{}) // Add empty field at end. + copy(configFields[i+1:], configFields[i:]) // Shift fields to the right. + configFields[i] = reflect.StructField{ // Write new field in place. + Name: fieldName, + Type: reflect.SliceOf(elemType), + Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`), + } +} + +func getConfigType(out reflect.Type) reflect.Type { + configTypesMu.Lock() + defer configTypesMu.Unlock() + if typ, ok := configTypes[out]; ok { + return typ + } + // Initial exported fields map one-to-one. + var fields []reflect.StructField + for i, n := 0, out.NumField(); i < n; i++ { + switch field := out.Field(i); { + case field.PkgPath == "" && field.Type != configsType: + fields = append(fields, field) + default: + fields = append(fields, reflect.StructField{ + Name: "_" + field.Name, // Field must be unexported. + PkgPath: out.PkgPath(), + Type: emptyStructType, + }) + } + } + // Append extra config fields on the end. + fields = append(fields, configFields...) + typ := reflect.StructOf(fields) + configTypes[out] = typ + return typ +} + +// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs +// that have a Configs field that should be inlined. +func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error { + outVal := reflect.ValueOf(out) + if outVal.Kind() != reflect.Ptr { + return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) + } + outVal = outVal.Elem() + if outVal.Kind() != reflect.Struct { + return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) + } + outTyp := outVal.Type() + + cfgTyp := getConfigType(outTyp) + cfgPtr := reflect.New(cfgTyp) + cfgVal := cfgPtr.Elem() + + // Copy shared fields (defaults) to dynamic value. + var configs *discovery.Configs + for i, n := 0, outVal.NumField(); i < n; i++ { + if outTyp.Field(i).Type == configsType { + configs = outVal.Field(i).Addr().Interface().(*discovery.Configs) + continue + } + if cfgTyp.Field(i).PkgPath != "" { + continue // Field is unexported: ignore. + } + cfgVal.Field(i).Set(outVal.Field(i)) + } + if configs == nil { + return fmt.Errorf("discovery: Configs field not found in type: %T", out) + } + + // Unmarshal into dynamic value. + if err := unmarshal(cfgPtr.Interface()); err != nil { + return replaceYAMLTypeError(err, cfgTyp, outTyp) + } + + // Copy shared fields from dynamic value. + for i, n := 0, outVal.NumField(); i < n; i++ { + if cfgTyp.Field(i).PkgPath != "" { + continue // Field is unexported: ignore. + } + outVal.Field(i).Set(cfgVal.Field(i)) + } + + var err error + *configs, err = readConfigs(cfgVal, outVal.NumField()) + return err +} + +func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) { + var ( + configs discovery.Configs + targets []*targetgroup.Group + ) + for i, n := startField, structVal.NumField(); i < n; i++ { + field := structVal.Field(i) + if field.Kind() != reflect.Slice { + panic("discovery: internal error: field is not a slice") + } + for k := 0; k < field.Len(); k++ { + val := field.Index(k) + if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) { + key := configFieldNames[field.Type().Elem()] + key = strings.TrimPrefix(key, configFieldPrefix) + return nil, fmt.Errorf("empty or null section in %s", key) + } + switch c := val.Interface().(type) { + case *targetgroup.Group: + // Add index to the static config target groups for unique identification + // within scrape pool. + c.Source = strconv.Itoa(len(targets)) + // Coalesce multiple static configs into a single static config. + targets = append(targets, c) + case discovery.Config: + configs = append(configs, c) + default: + panic("discovery: internal error: slice element is not a Config") + } + } + } + if len(targets) > 0 { + configs = append(configs, discovery.StaticConfig(targets)) + } + return configs, nil +} + +// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs +// that have a Configs field that should be inlined. +func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) { + inVal := reflect.ValueOf(in) + for inVal.Kind() == reflect.Ptr { + inVal = inVal.Elem() + } + inTyp := inVal.Type() + + cfgTyp := getConfigType(inTyp) + cfgPtr := reflect.New(cfgTyp) + cfgVal := cfgPtr.Elem() + + // Copy shared fields to dynamic value. + var configs *discovery.Configs + for i, n := 0, inTyp.NumField(); i < n; i++ { + if inTyp.Field(i).Type == configsType { + configs = inVal.Field(i).Addr().Interface().(*discovery.Configs) + } + if cfgTyp.Field(i).PkgPath != "" { + continue // Field is unexported: ignore. + } + cfgVal.Field(i).Set(inVal.Field(i)) + } + if configs == nil { + return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in) + } + + if err := writeConfigs(cfgVal, *configs); err != nil { + return nil, err + } + + return cfgPtr.Interface(), nil +} + +func writeConfigs(structVal reflect.Value, configs discovery.Configs) error { + targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group) + for _, c := range configs { + if sc, ok := c.(discovery.StaticConfig); ok { + *targets = append(*targets, sc...) + continue + } + fieldName, ok := configFieldNames[reflect.TypeOf(c)] + if !ok { + return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c) + } + field := structVal.FieldByName(fieldName) + field.Set(reflect.Append(field, reflect.ValueOf(c))) + } + return nil +} + +func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { + if e, ok := err.(*yaml.TypeError); ok { + oldStr := oldTyp.String() + newStr := newTyp.String() + for i, s := range e.Errors { + e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + } + } + return err +} diff --git a/discovery/manager.go b/discovery/manager.go index b3dae5c59f..639a241804 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -65,7 +65,7 @@ var ( ) ) -func init() { +func RegisterMetrics() { prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates) } diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 07424d7318..8a8c9f70d4 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -1,9 +1,9 @@ --- -title: Feature Flags +title: Feature flags sort_rank: 11 --- -# Feature Flags +# Feature flags Here is a list of features that are disabled by default since they are breaking changes or are considered experimental. Their behaviour can change in future releases which will be communicated via the [release changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md). @@ -46,7 +46,7 @@ More details can be found [here](querying/basics.md#offset-modifier). The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview). -## Exemplars Storage +## Exemplars storage `--enable-feature=exemplar-storage` @@ -54,7 +54,7 @@ The remote write receiver allows Prometheus to accept remote write requests from Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The flag `storage.exemplars.exemplars-limit` can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `traceID=` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration). -## Memory Snapshot on Shutdown +## Memory snapshot on shutdown `--enable-feature=memory-snapshot-on-shutdown` @@ -62,7 +62,7 @@ This takes the snapshot of the chunks that are in memory along with the series i it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped chunks without the need of WAL replay. -## Extra Scrape Metrics +## Extra scrape metrics `--enable-feature=extra-scrape-metrics` @@ -71,3 +71,17 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow - `scrape_timeout_seconds`. The configured `scrape_timeout` for a target. This allows you to measure each target to find out how close they are to timing out with `scrape_duration_seconds / scrape_timeout_seconds`. - `scrape_sample_limit`. The configured `sample_limit` for a target. This allows you to measure each target to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`. + +## New service discovery manager + +`--enable-feature=new-service-discovery-manager` + +When enabled, Prometheus uses a new service discovery manager that does not +restart unchanged discoveries upon reloading. This makes reloads faster and reduces +pressure on service discoveries' sources. + +Users are encouraged to test the new service discovery manager and report any +issues upstream. + +In future releases, this new service discovery manager will become the default and +this feature flag will be ignored. From 1043d2b5949f40c28aee3ab640bab33136c626d7 Mon Sep 17 00:00:00 2001 From: Vladimir Kononov Date: Wed, 20 Oct 2021 12:16:20 +0400 Subject: [PATCH 006/161] Discovery: abstain from restarting providers if possible (#9321) (#9349) * Abstain from restarting discovery providers if possible (#9321) Signed-off-by: Vladimir Kononov --- discovery/manager.go | 187 ++++++++++++++++++------ discovery/manager_test.go | 300 +++++++++++++++++++++++++++++++++++--- 2 files changed, 419 insertions(+), 68 deletions(-) diff --git a/discovery/manager.go b/discovery/manager.go index 639a241804..ad1e5ad064 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -74,12 +74,26 @@ type poolKey struct { provider string } -// provider holds a Discoverer instance, its configuration and its subscribers. +// provider holds a Discoverer instance, its configuration, cancel func and its subscribers. type provider struct { name string d Discoverer - subs []string config interface{} + + cancel context.CancelFunc + // done should be called after cleaning up resources associated with cancelled provider. + done func() + + mu sync.RWMutex + subs map[string]struct{} + + // newSubs is used to temporary store subs to be used upon config reload completion. + newSubs map[string]struct{} +} + +// IsStarted return true if Discoverer is started. +func (p *provider) IsStarted() bool { + return p.cancel != nil } // NewManager is the Discovery Manager constructor. @@ -88,13 +102,12 @@ func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager logger = log.NewNopLogger() } mgr := &Manager{ - logger: logger, - syncCh: make(chan map[string][]*targetgroup.Group), - targets: make(map[poolKey]map[string]*targetgroup.Group), - discoverCancel: []context.CancelFunc{}, - ctx: ctx, - updatert: 5 * time.Second, - triggerSend: make(chan struct{}, 1), + logger: logger, + syncCh: make(chan map[string][]*targetgroup.Group), + targets: make(map[poolKey]map[string]*targetgroup.Group), + ctx: ctx, + updatert: 5 * time.Second, + triggerSend: make(chan struct{}, 1), } for _, option := range options { option(mgr) @@ -114,15 +127,16 @@ func Name(n string) func(*Manager) { // Manager maintains a set of discovery providers and sends each update to a map channel. // Targets are grouped by the target set name. type Manager struct { - logger log.Logger - name string - mtx sync.RWMutex - ctx context.Context - discoverCancel []context.CancelFunc + logger log.Logger + name string + mtx sync.RWMutex + ctx context.Context - // Some Discoverers(eg. k8s) send only the updates for a given target group + // Some Discoverers(e.g. k8s) send only the updates for a given target group, // so we use map[tg.Source]*targetgroup.Group to know which group to update. - targets map[poolKey]map[string]*targetgroup.Group + targets map[poolKey]map[string]*targetgroup.Group + targetsMtx sync.Mutex + // providers keeps track of SD providers. providers []*provider // The sync channel sends the updates as a map where the key is the job value from the scrape config. @@ -132,11 +146,14 @@ type Manager struct { // should only be modified in unit tests. updatert time.Duration - // The triggerSend channel signals to the manager that new updates have been received from providers. + // The triggerSend channel signals to the Manager that new updates have been received from providers. triggerSend chan struct{} + + // lastProvider counts providers registered during Manager's lifetime. + lastProvider uint } -// Run starts the background processing +// Run starts the background processing. func (m *Manager) Run() error { go m.sender() for range m.ctx.Done() { @@ -151,31 +168,80 @@ func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { return m.syncCh } -// ApplyConfig removes all running discovery providers and starts new ones using the provided config. +// ApplyConfig checks if discovery provider with supplied config is already running and keeps them as is. +// Remaining providers are then stopped and new required providers are started using the provided config. func (m *Manager) ApplyConfig(cfg map[string]Configs) error { m.mtx.Lock() defer m.mtx.Unlock() - for pk := range m.targets { - if _, ok := cfg[pk.setName]; !ok { - discoveredTargets.DeleteLabelValues(m.name, pk.setName) - } - } - m.cancelDiscoverers() - m.targets = make(map[poolKey]map[string]*targetgroup.Group) - m.providers = nil - m.discoverCancel = nil - - failedCount := 0 + var failedCount int for name, scfg := range cfg { failedCount += m.registerProviders(scfg, name) - discoveredTargets.WithLabelValues(m.name, name).Set(0) } failedConfigs.WithLabelValues(m.name).Set(float64(failedCount)) + var ( + wg sync.WaitGroup + // keep shows if we keep any providers after reload. + keep bool + newProviders []*provider + ) for _, prov := range m.providers { - m.startProvider(m.ctx, prov) + // Cancel obsolete providers. + if len(prov.newSubs) == 0 { + wg.Add(1) + prov.done = func() { + wg.Done() + } + prov.cancel() + continue + } + newProviders = append(newProviders, prov) + // refTargets keeps reference targets used to populate new subs' targets + var refTargets map[string]*targetgroup.Group + prov.mu.Lock() + for s := range prov.subs { + keep = true + refTargets = m.targets[poolKey{s, prov.name}] + // Remove obsolete subs' targets. + if _, ok := prov.newSubs[s]; !ok { + m.targetsMtx.Lock() + delete(m.targets, poolKey{s, prov.name}) + m.targetsMtx.Unlock() + discoveredTargets.DeleteLabelValues(m.name, s) + } + } + // Set metrics and targets for new subs. + for s := range prov.newSubs { + if _, ok := prov.subs[s]; !ok { + discoveredTargets.WithLabelValues(m.name, s).Set(0) + } + if l := len(refTargets); l > 0 { + m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l) + for k, v := range refTargets { + m.targets[poolKey{s, prov.name}][k] = v + } + } + } + prov.subs = prov.newSubs + prov.newSubs = map[string]struct{}{} + prov.mu.Unlock() + if !prov.IsStarted() { + m.startProvider(m.ctx, prov) + } } + // Currently downstream managers expect full target state upon config reload, so we must oblige. + // While startProvider does pull the trigger, it may take some time to do so, therefore + // we pull the trigger as soon as possible so that downstream managers can populate their state. + // See https://github.com/prometheus/prometheus/pull/8639 for details. + if keep { + select { + case m.triggerSend <- struct{}{}: + default: + } + } + m.providers = newProviders + wg.Wait() return nil } @@ -185,7 +251,9 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D p := &provider{ name: name, d: worker, - subs: []string{name}, + subs: map[string]struct{}{ + name: {}, + }, } m.providers = append(m.providers, p) m.startProvider(ctx, p) @@ -196,13 +264,29 @@ func (m *Manager) startProvider(ctx context.Context, p *provider) { ctx, cancel := context.WithCancel(ctx) updates := make(chan []*targetgroup.Group) - m.discoverCancel = append(m.discoverCancel, cancel) + p.cancel = cancel go p.d.Run(ctx, updates) go m.updater(ctx, p, updates) } +// cleaner cleans resources associated with provider. +func (m *Manager) cleaner(p *provider) { + m.targetsMtx.Lock() + p.mu.RLock() + for s := range p.subs { + delete(m.targets, poolKey{s, p.name}) + } + p.mu.RUnlock() + m.targetsMtx.Unlock() + if p.done != nil { + p.done() + } +} + func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) { + // Ensure targets from this provider are cleaned up. + defer m.cleaner(p) for { select { case <-ctx.Done(): @@ -211,12 +295,16 @@ func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targ receivedUpdates.WithLabelValues(m.name).Inc() if !ok { level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + // Wait for provider cancellation to ensure targets are cleaned up when expected. + <-ctx.Done() return } - for _, s := range p.subs { + p.mu.RLock() + for s := range p.subs { m.updateGroup(poolKey{setName: s, provider: p.name}, tgs) } + p.mu.RUnlock() select { case m.triggerSend <- struct{}{}: @@ -234,7 +322,7 @@ func (m *Manager) sender() { select { case <-m.ctx.Done(): return - case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. + case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker. select { case <-m.triggerSend: sentUpdates.WithLabelValues(m.name).Inc() @@ -255,14 +343,18 @@ func (m *Manager) sender() { } func (m *Manager) cancelDiscoverers() { - for _, c := range m.discoverCancel { - c() + m.mtx.RLock() + defer m.mtx.RUnlock() + for _, p := range m.providers { + if p.cancel != nil { + p.cancel() + } } } func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { - m.mtx.Lock() - defer m.mtx.Unlock() + m.targetsMtx.Lock() + defer m.targetsMtx.Unlock() if _, ok := m.targets[poolKey]; !ok { m.targets[poolKey] = make(map[string]*targetgroup.Group) @@ -275,11 +367,11 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { } func (m *Manager) allGroups() map[string][]*targetgroup.Group { - m.mtx.RLock() - defer m.mtx.RUnlock() - tSets := map[string][]*targetgroup.Group{} n := map[string]int{} + + m.targetsMtx.Lock() + defer m.targetsMtx.Unlock() for pkey, tsets := range m.targets { for _, tg := range tsets { // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' @@ -303,7 +395,7 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { add := func(cfg Config) { for _, p := range m.providers { if reflect.DeepEqual(cfg, p.config) { - p.subs = append(p.subs, setName) + p.newSubs[setName] = struct{}{} added = true return } @@ -318,11 +410,14 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { return } m.providers = append(m.providers, &provider{ - name: fmt.Sprintf("%s/%d", typ, len(m.providers)), + name: fmt.Sprintf("%s/%d", typ, m.lastProvider), d: d, config: cfg, - subs: []string{setName}, + newSubs: map[string]struct{}{ + setName: {}, + }, }) + m.lastProvider++ added = true } for _, cfg := range cfgs { diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 0a438306e4..2caae0080c 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -18,6 +18,7 @@ import ( "fmt" "sort" "strconv" + "sync" "testing" "time" @@ -718,6 +719,32 @@ func staticConfig(addrs ...string) StaticConfig { return cfg } +func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key string, label string, present bool) { + t.Helper() + if _, ok := tGroups[key]; !ok { + t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups) + return + } + match := false + var mergedTargets string + for _, targetGroups := range tGroups[key] { + for _, l := range targetGroups.Targets { + mergedTargets = mergedTargets + " " + l.String() + if l.String() == label { + match = true + } + } + + } + if match != present { + msg := "" + if !present { + msg = "not" + } + t.Fatalf("%q should %s be present in Group labels: %q", label, msg, mergedTargets) + } +} + func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { t.Helper() if _, ok := tSets[poolKey]; !ok { @@ -746,7 +773,180 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou } } -func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { +func pk(provider, setName string, n int) poolKey { + return poolKey{ + setName: setName, + provider: fmt.Sprintf("%s/%d", provider, n), + } +} + +func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + syncedTargets := <-discoveryManager.SyncCh() + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + + discoveryManager.ApplyConfig(c) + + syncedTargets = <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) +} + +func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + syncedTargets := <-discoveryManager.SyncCh() + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + + c["prometheus2"] = c["prometheus"] + delete(c, "prometheus") + discoveryManager.ApplyConfig(c) + + syncedTargets = <-discoveryManager.SyncCh() + p = pk("static", "prometheus2", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus2"])) +} + +func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + <-discoveryManager.SyncCh() + + c["prometheus2"] = c["prometheus"] + discoveryManager.ApplyConfig(c) + syncedTargets := <-discoveryManager.SyncCh() + require.Equal(t, 2, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus2"])) + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 2, len(discoveryManager.targets)) + + delete(c, "prometheus") + discoveryManager.ApplyConfig(c) + syncedTargets = <-discoveryManager.SyncCh() + p = pk("static", "prometheus2", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus2"])) +} + +func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + syncedTargets := <-discoveryManager.SyncCh() + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + + var mu sync.Mutex + c["prometheus2"] = Configs{ + lockStaticConfig{ + mu: &mu, + config: staticConfig("bar:9090"), + }, + } + mu.Lock() + discoveryManager.ApplyConfig(c) + + // Original targets should be present as soon as possible. + syncedTargets = <-discoveryManager.SyncCh() + mu.Unlock() + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + + // prometheus2 configs should be ready on second sync. + syncedTargets = <-discoveryManager.SyncCh() + require.Equal(t, 2, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"bar:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus2"])) + + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + p = pk("lockstatic", "prometheus2", 1) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) + require.Equal(t, 2, len(discoveryManager.targets)) + + // Delete part of config and ensure only original targets exist. + delete(c, "prometheus2") + discoveryManager.ApplyConfig(c) + syncedTargets = <-discoveryManager.SyncCh() + require.Equal(t, 1, len(discoveryManager.targets)) + verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) +} + +func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() discoveryManager := NewManager(ctx, log.NewNopLogger()) @@ -760,18 +960,29 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { } discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) + syncedTargets := <-discoveryManager.SyncCh() + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true) + require.Equal(t, 2, len(syncedTargets["prometheus"])) c["prometheus"] = Configs{ staticConfig("foo:9090"), } discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false) + syncedTargets = <-discoveryManager.SyncCh() + require.Equal(t, 1, len(discoveryManager.targets)) + p = pk("static", "prometheus", 1) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", false) + require.Equal(t, 1, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) } func TestDiscovererConfigs(t *testing.T) { @@ -789,10 +1000,18 @@ func TestDiscovererConfigs(t *testing.T) { } discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true) + syncedTargets := <-discoveryManager.SyncCh() + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) + p = pk("static", "prometheus", 1) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"baz:9090\"}", true) + require.Equal(t, 2, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"baz:9090\"}", true) + require.Equal(t, 3, len(syncedTargets["prometheus"])) } // TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after @@ -812,20 +1031,23 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { } discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + syncedTargets := <-discoveryManager.SyncCh() + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) c["prometheus"] = Configs{ StaticConfig{{}}, } discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - pkey := poolKey{setName: "prometheus", provider: "static/0"} - targetGroups, ok := discoveryManager.targets[pkey] + syncedTargets = <-discoveryManager.SyncCh() + p = pk("static", "prometheus", 1) + targetGroups, ok := discoveryManager.targets[p] if !ok { - t.Fatalf("'%v' should be present in target groups", pkey) + t.Fatalf("'%v' should be present in target groups", p) } group, ok := targetGroups[""] if !ok { @@ -835,6 +1057,12 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if len(group.Targets) != 0 { t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) } + require.Equal(t, 1, len(syncedTargets)) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { + t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) + } + } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { @@ -854,12 +1082,17 @@ func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { } discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + syncedTargets := <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, pk("static", "prometheus2", 0), "{__address__=\"foo:9090\"}", true) if len(discoveryManager.providers) != 1 { t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) } + require.Equal(t, 2, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus2"])) } func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { @@ -891,6 +1124,29 @@ type errorConfig struct{ err error } func (e errorConfig) Name() string { return "error" } func (e errorConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return nil, e.err } +type lockStaticConfig struct { + mu *sync.Mutex + config StaticConfig +} + +func (s lockStaticConfig) Name() string { return "lockstatic" } +func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) { + return (lockStaticDiscoverer)(s), nil +} + +type lockStaticDiscoverer lockStaticConfig + +func (s lockStaticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) { + // TODO: existing implementation closes up chan, but documentation explicitly forbids it...? + defer close(up) + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-ctx.Done(): + case up <- s.config: + } +} + func TestGaugeFailedConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() From a278ea4b58226c3db271fb78703cd110089cba9b Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 20 Oct 2021 15:03:02 +0100 Subject: [PATCH 007/161] promql: copy data when short-circuiting (#9552) * promql: copy data when short-circuiting Because the range query loop re-uses the output buffer each time round, we must copy results into the buffer rather than using input as output. Signed-off-by: Bryan Boreham --- promql/engine.go | 9 ++++++--- promql/engine_test.go | 26 ++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index bd3d836a31..bfa575cac8 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1792,9 +1792,11 @@ func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, panic("set operations must only use many-to-many matching") } if len(lhs) == 0 { // Short-circuit. - return rhs + enh.Out = append(enh.Out, rhs...) + return enh.Out } else if len(rhs) == 0 { - return lhs + enh.Out = append(enh.Out, lhs...) + return enh.Out } leftSigs := map[string]struct{}{} @@ -1819,7 +1821,8 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi // Short-circuit: empty rhs means we will return everything in lhs; // empty lhs means we will return empty - don't need to build a map. if len(lhs) == 0 || len(rhs) == 0 { - return lhs + enh.Out = append(enh.Out, lhs...) + return enh.Out } rightSigs := map[string]struct{}{} diff --git a/promql/engine_test.go b/promql/engine_test.go index c4eb07d523..7ac6ae1d2f 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -2410,6 +2410,32 @@ func TestRangeQuery(t *testing.T) { End: time.Unix(120, 0), Interval: 1 * time.Minute, }, + { + Name: "short-circuit", + Load: `load 30s + foo{job="1"} 1+1x4 + bar{job="2"} 1+1x4`, + Query: `foo > 2 or bar`, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Metric: labels.Labels{ + labels.Label{Name: "__name__", Value: "bar"}, + labels.Label{Name: "job", Value: "2"}, + }, + }, + Series{ + Points: []Point{{V: 3, T: 60000}, {V: 5, T: 120000}}, + Metric: labels.Labels{ + labels.Label{Name: "__name__", Value: "foo"}, + labels.Label{Name: "job", Value: "1"}, + }, + }, + }, + Start: time.Unix(0, 0), + End: time.Unix(120, 0), + Interval: 1 * time.Minute, + }, } for _, c := range cases { t.Run(c.Name, func(t *testing.T) { From c00c6ed01f0ad210674af221216541b6d1e4148c Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 20 Oct 2021 16:13:36 +0200 Subject: [PATCH 008/161] promql: Add more test cases for `histogram_quantile` This is motivated by the doubts raised in #7970. Signed-off-by: beorn7 --- promql/testdata/histograms.test | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/promql/testdata/histograms.test b/promql/testdata/histograms.test index a9bb9b5e45..07e9a9225f 100644 --- a/promql/testdata/histograms.test +++ b/promql/testdata/histograms.test @@ -15,6 +15,15 @@ load 5m testhistogram_bucket{le="0.3", start="negative"} 0+2x10 testhistogram_bucket{le="+Inf", start="negative"} 0+3x10 +# Another test histogram, where q(1/6), q(1/2), and q(5/6) are each in +# the middle of a bucket and should therefore be 1, 3, and 5, +# respectively. +load 5m + testhistogram2_bucket{le="0"} 0+0x10 + testhistogram2_bucket{le="2"} 0+1x10 + testhistogram2_bucket{le="4"} 0+2x10 + testhistogram2_bucket{le="6"} 0+3x10 + testhistogram2_bucket{le="+Inf"} 0+3x10 # Now a more realistic histogram per job and instance to test aggregation. load 5m @@ -91,6 +100,25 @@ eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) {start="positive"} 0.72 {start="negative"} 0.3 +# Want results exactly in the middle of the bucket. +eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket) + {} 1 + +eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket) + {} 3 + +eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket) + {} 5 + +eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) + {} 1 + +eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) + {} 3 + +eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) + {} 5 + # Aggregated histogram: Everything in one. eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.075 From 3458e338c6485eeccb5141ef150ec5433761f629 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Wed, 20 Oct 2021 21:03:17 +0200 Subject: [PATCH 009/161] docs: Improve PuppetDB example (#9547) Signed-off-by: Julien Pivotto --- documentation/examples/prometheus-puppetdb.yml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/documentation/examples/prometheus-puppetdb.yml b/documentation/examples/prometheus-puppetdb.yml index f7f4313e72..5b10d39fc7 100644 --- a/documentation/examples/prometheus-puppetdb.yml +++ b/documentation/examples/prometheus-puppetdb.yml @@ -14,11 +14,10 @@ scrape_configs: - job_name: 'puppetdb-scrape-jobs' puppetdb_sd_configs: - # This example uses the Prometheus::Scrape_job - # exported resources. - # https://github.com/camptocamp/prometheus-puppetdb-sd - # This examples is compatible with Prometheus-puppetdb-sd, - # if the exported Prometheus::Scrape_job only have at most one target. + # This example uses Prometheus::Scrape_job exported resources. + # It is compatible with the prometheus-puppetdb-sd + # (https://github.com/camptocamp/prometheus-puppetdb-sd) if the + # exported resources have exactly one target. - url: https://puppetdb.example.com query: 'resources { type = "Prometheus::Scrape_job" and exported = true }' include_parameters: true From 89a6ebd7993109fc82fdf731925b762fc27d2df9 Mon Sep 17 00:00:00 2001 From: Levi Harrison Date: Wed, 20 Oct 2021 17:09:58 -0400 Subject: [PATCH 010/161] Add common HTTP client to Azure SD (#9267) * Add `proxy_url` option to Azure SD Signed-off-by: Levi Harrison --- config/config_test.go | 1 + discovery/azure/azure.go | 13 +++++++++++ docs/configuration/configuration.md | 36 +++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+) diff --git a/config/config_test.go b/config/config_test.go index 3055de74d8..a6c30b3e5e 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -565,6 +565,7 @@ var expectedConf = &Config{ AuthenticationMethod: "OAuth", RefreshInterval: model.Duration(5 * time.Minute), Port: 9100, + HTTPClientConfig: config.DefaultHTTPClientConfig, }, }, }, diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index b0de0abd8c..c81eb22053 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -64,6 +64,7 @@ var DefaultSDConfig = SDConfig{ RefreshInterval: model.Duration(5 * time.Minute), Environment: azure.PublicCloud.Name, AuthenticationMethod: authMethodOAuth, + HTTPClientConfig: config_util.DefaultHTTPClientConfig, } func init() { @@ -80,6 +81,8 @@ type SDConfig struct { ClientSecret config_util.Secret `yaml:"client_secret,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` AuthenticationMethod string `yaml:"authentication_method,omitempty"` + + HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` } // Name returns the name of the Config. @@ -200,19 +203,29 @@ func createAzureClient(cfg SDConfig) (azureClient, error) { } } + client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd") + if err != nil { + return azureClient{}, err + } + sender := autorest.DecorateSender(client) + bearerAuthorizer := autorest.NewBearerAuthorizer(spt) c.vm = compute.NewVirtualMachinesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.vm.Authorizer = bearerAuthorizer + c.vm.Sender = sender c.nic = network.NewInterfacesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.nic.Authorizer = bearerAuthorizer + c.nic.Sender = sender c.vmss = compute.NewVirtualMachineScaleSetsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.vmss.Authorizer = bearerAuthorizer + c.vm.Sender = sender c.vmssvm = compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.vmssvm.Authorizer = bearerAuthorizer + c.vmssvm.Sender = sender return c, nil } diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 6b451b595c..291b9fe035 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -429,6 +429,42 @@ subscription_id: # The port to scrape metrics from. If using the public IP address, this must # instead be specified in the relabeling rule. [ port: | default = 80 ] + +# Authentication information used to authenticate to the consul server. +# Note that `basic_auth`, `authorization` and `oauth2` options are +# mutually exclusive. +# `password` and `password_file` are mutually exclusive. + +# Optional HTTP basic authentication information, currently not support by Azure. +basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + +# Optional `Authorization` header configuration, currently not supported by Azure. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials to the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] + +# Optional OAuth 2.0 configuration, currently not supported by Azure. +oauth2: + [ ] + +# Optional proxy URL. +[ proxy_url: ] + +# Configure whether HTTP requests follow HTTP 3xx redirects. +[ follow_redirects: | default = true ] + +# TLS configuration. +tls_config: + [ ] ``` ### `` From 77f411b2ec5acf53c85883f3d63e178dd00743c7 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Wed, 20 Oct 2021 23:10:18 +0200 Subject: [PATCH 011/161] Enable tls_config in oauth2 (#9550) * Enable tls_config in oauth2 Signed-off-by: Julien Pivotto --- config/config_test.go | 4 ++++ config/testdata/conf.good.yml | 3 +++ docs/configuration/configuration.md | 4 ++++ go.mod | 2 +- go.sum | 4 ++-- 5 files changed, 14 insertions(+), 3 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index a6c30b3e5e..a2efaa6768 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -103,6 +103,10 @@ var expectedConf = &Config{ ClientID: "123", ClientSecret: "456", TokenURL: "http://remote1/auth", + TLSConfig: config.TLSConfig{ + CertFile: filepath.FromSlash("testdata/valid_cert_file"), + KeyFile: filepath.FromSlash("testdata/valid_key_file"), + }, }, FollowRedirects: true, }, diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index cdd0c0b306..bfe0228be9 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -23,6 +23,9 @@ remote_write: client_id: "123" client_secret: "456" token_url: "http://remote1/auth" + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file - url: http://remote2/push name: rw_tls diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 291b9fe035..e3d1f76b81 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -383,6 +383,10 @@ token_url: # Optional parameters to append to the token URL. endpoint_params: [ : ... ] + +# Configures the token request's TLS settings. +tls_config: + [ ] ``` ### `` diff --git a/go.mod b/go.mod index 82de9c1fe1..5daa9bfd91 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,7 @@ require ( github.com/prometheus/alertmanager v0.23.0 github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.31.1 + github.com/prometheus/common v0.32.1 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.7.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 diff --git a/go.sum b/go.sum index 4fda1999c0..3f544a07f6 100644 --- a/go.sum +++ b/go.sum @@ -1143,8 +1143,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs= -github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= From b0c98e01c8197b8ba09992100fa297d34c0a9606 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Wed, 20 Oct 2021 23:44:45 +0200 Subject: [PATCH 012/161] Include scrape labels in the hash (#9551) Signed-off-by: Julien Pivotto --- scrape/target.go | 11 +---------- scrape/target_test.go | 26 -------------------------- 2 files changed, 1 insertion(+), 36 deletions(-) diff --git a/scrape/target.go b/scrape/target.go index ada1bcdc51..35d691751d 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -144,17 +144,8 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) { func (t *Target) hash() uint64 { h := fnv.New64a() - // We must build a label set without the scrape interval and timeout - // labels because those aren't defining attributes of a target - // and can be changed without qualifying its parent as a new target, - // therefore they should not effect its unique hash. - l := t.labels.Map() - delete(l, model.ScrapeIntervalLabel) - delete(l, model.ScrapeTimeoutLabel) - lset := labels.FromMap(l) - //nolint: errcheck - h.Write([]byte(fmt.Sprintf("%016d", lset.Hash()))) + h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash()))) //nolint: errcheck h.Write([]byte(t.URL().String())) diff --git a/scrape/target_test.go b/scrape/target_test.go index a578d2760b..542326d7c4 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -382,29 +382,3 @@ func TestTargetsFromGroup(t *testing.T) { t.Fatalf("Expected error %s, got %s", expectedError, failures[0]) } } - -func TestTargetHash(t *testing.T) { - target1 := &Target{ - labels: labels.Labels{ - {Name: model.AddressLabel, Value: "localhost"}, - {Name: model.SchemeLabel, Value: "http"}, - {Name: model.MetricsPathLabel, Value: "/metrics"}, - {Name: model.ScrapeIntervalLabel, Value: "15s"}, - {Name: model.ScrapeTimeoutLabel, Value: "500ms"}, - }, - } - hash1 := target1.hash() - - target2 := &Target{ - labels: labels.Labels{ - {Name: model.AddressLabel, Value: "localhost"}, - {Name: model.SchemeLabel, Value: "http"}, - {Name: model.MetricsPathLabel, Value: "/metrics"}, - {Name: model.ScrapeIntervalLabel, Value: "14s"}, - {Name: model.ScrapeTimeoutLabel, Value: "600ms"}, - }, - } - hash2 := target2.hash() - - require.Equal(t, hash1, hash2, "Scrape interval and duration labels should not effect hash.") -} From 18886c33c27ef4cba4619fbb38a8093cd1517c47 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 21 Oct 2021 15:35:52 +0300 Subject: [PATCH 013/161] docs/operators: fix a typo (#9559) s/are preserved the output/are preserved in the output Signed-off-by: Julian Wiedmann --- docs/querying/operators.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/querying/operators.md b/docs/querying/operators.md index ae6265f6e2..a0491bdbeb 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -222,7 +222,7 @@ or both `(label1, label2)` and `(label1, label2,)` are valid syntax. `without` removes the listed labels from the result vector, while -all other labels are preserved the output. `by` does the opposite and drops +all other labels are preserved in the output. `by` does the opposite and drops labels that are not listed in the `by` clause, even if their label values are identical between all elements of the vector. From 9d0058a09e47a2fd1dc6f74298566a34bae16f03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Furkan=20T=C3=BCrkal?= Date: Thu, 21 Oct 2021 15:59:20 +0300 Subject: [PATCH 014/161] Bind port 0 in main_test (#9558) Fixes #9499 Signed-off-by: Furkan --- cmd/prometheus/main_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 182b6d228c..98b2dd9cf9 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -202,7 +202,7 @@ func TestWALSegmentSizeBounds(t *testing.T) { } for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} { - prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) // Log stderr in case of failure. stderr, err := prom.StderrPipe() @@ -244,7 +244,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { } for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} { - prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) // Log stderr in case of failure. stderr, err := prom.StderrPipe() From 4c088ba8ba5287975987a1f6a8fb80350efcc99b Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 21 Oct 2021 21:51:44 +0200 Subject: [PATCH 015/161] Update go dependencies (#9555) Signed-off-by: Julien Pivotto --- go.mod | 52 ++++++++++----------- go.sum | 142 +++++++++++++++++++++++++++++++++------------------------ 2 files changed, 108 insertions(+), 86 deletions(-) diff --git a/go.mod b/go.mod index 5daa9bfd91..32d29cd72a 100644 --- a/go.mod +++ b/go.mod @@ -3,38 +3,38 @@ module github.com/prometheus/prometheus go 1.14 require ( - github.com/Azure/azure-sdk-for-go v57.1.0+incompatible - github.com/Azure/go-autorest/autorest v0.11.20 - github.com/Azure/go-autorest/autorest/adal v0.9.15 + github.com/Azure/azure-sdk-for-go v58.2.0+incompatible + github.com/Azure/go-autorest/autorest v0.11.21 + github.com/Azure/go-autorest/autorest/adal v0.9.16 github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect - github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922 - github.com/aws/aws-sdk-go v1.40.37 + github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a + github.com/aws/aws-sdk-go v1.41.7 github.com/cespare/xxhash/v2 v2.1.2 github.com/containerd/containerd v1.5.4 // indirect github.com/dennwc/varint v1.0.0 github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 - github.com/digitalocean/godo v1.65.0 - github.com/docker/docker v20.10.8+incompatible + github.com/digitalocean/godo v1.69.1 + github.com/docker/docker v20.10.9+incompatible github.com/docker/go-connections v0.4.0 // indirect github.com/edsrzf/mmap-go v1.0.0 github.com/envoyproxy/go-control-plane v0.9.9 github.com/envoyproxy/protoc-gen-validate v0.6.1 - github.com/go-kit/log v0.1.0 + github.com/go-kit/log v0.2.0 github.com/go-logfmt/logfmt v0.5.1 - github.com/go-openapi/strfmt v0.20.2 + github.com/go-openapi/strfmt v0.20.3 github.com/go-zookeeper/zk v1.0.2 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/google/pprof v0.0.0-20210827144239-02619b876842 - github.com/gophercloud/gophercloud v0.20.0 + github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0 + github.com/gophercloud/gophercloud v0.22.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.10.1 + github.com/hashicorp/consul/api v1.11.0 github.com/hetznercloud/hcloud-go v1.32.0 - github.com/influxdata/influxdb v1.9.3 - github.com/json-iterator/go v1.1.11 + github.com/influxdata/influxdb v1.9.5 + github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b - github.com/linode/linodego v0.32.0 + github.com/linode/linodego v1.1.0 github.com/miekg/dns v1.1.43 github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect github.com/morikuni/aec v1.0.0 // indirect @@ -57,25 +57,25 @@ require ( github.com/uber/jaeger-client-go v2.29.1+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible go.uber.org/atomic v1.9.0 - go.uber.org/goleak v1.1.10 - golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f - golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f + go.uber.org/goleak v1.1.12 + golang.org/x/net v0.0.0-20211020060615-d418f374d309 + golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 + golang.org/x/sys v0.0.0-20211020174200-9d6173849985 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac - golang.org/x/tools v0.1.5 - google.golang.org/api v0.56.0 - google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 + golang.org/x/tools v0.1.7 + google.golang.org/api v0.59.0 + google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a google.golang.org/protobuf v1.27.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b - k8s.io/api v0.22.1 - k8s.io/apimachinery v0.22.1 - k8s.io/client-go v0.22.1 + k8s.io/api v0.22.2 + k8s.io/apimachinery v0.22.2 + k8s.io/client-go v0.22.2 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.10.0 + k8s.io/klog/v2 v2.20.0 ) replace ( diff --git a/go.sum b/go.sum index 3f544a07f6..e5e134cbba 100644 --- a/go.sum +++ b/go.sum @@ -24,8 +24,10 @@ cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAV cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -49,33 +51,31 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v57.1.0+incompatible h1:TKQ3ieyB0vVKkF6t9dsWbMjq56O1xU3eh3Ec09v6ajM= -github.com/Azure/azure-sdk-for-go v57.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v58.2.0+incompatible h1:iCb2tuoEm3N7ZpUDOvu1Yxl1B3iOVDmaD6weaRuIPzs= +github.com/Azure/azure-sdk-for-go v58.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M= -github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= +github.com/Azure/go-autorest/autorest v0.11.21 h1:w77zY/9RnUAWcIQyDC0Fc89mCvwftR8F+zsR/OH6enk= +github.com/Azure/go-autorest/autorest v0.11.21/go.mod h1:Do/yuMSW/13ayUkcVREpsMHGG+MvV81uzSCFgYPj4tM= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= -github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -152,14 +152,15 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922 h1:8ypNbf5sd3Sm3cKJ9waOGoQv6dKAFiFty9L6NP1AqJ4= -github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -185,8 +186,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.40.37 h1:I+Q6cLctkFyMMrKukcDnj+i2kjrQ37LGiOM6xmsxC48= -github.com/aws/aws-sdk-go v1.40.37/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.41.7 h1:vlpR8Cky3ZxUVNINgeRZS6N0p6zmFvu/ZqRRwrTI25U= +github.com/aws/aws-sdk-go v1.41.7/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= @@ -360,14 +361,13 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.65.0 h1:3SywGJBC18HaYtPQF+T36jYzXBi+a6eIMonSjDll7TA= -github.com/digitalocean/godo v1.65.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.69.1 h1:aCyfwth8R3DeOaWB9J9E8v7cjlDIlF19eXTt8R3XhTE= +github.com/digitalocean/godo v1.69.1/go.mod h1:epPuOzTOOJujNo0nduDj2D5O1zu8cSpp9R+DdN0W9I0= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= @@ -375,8 +375,8 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM= -github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.9+incompatible h1:JlsVnETOjM2RLQa0Cc1XCIspUdXW3Zenq9P54uXBm6k= +github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -453,8 +453,9 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -539,8 +540,8 @@ github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= -github.com/go-openapi/strfmt v0.20.2 h1:6XZL+fF4VZYFxKQGLAUB358hOrRh/wS51uWEtlONADE= -github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/strfmt v0.20.3 h1:YVG4ZgPZ00km/lRHrIf7c6cKL5/4FAUtG2T9RxWAgDY= +github.com/go-openapi/strfmt v0.20.3/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -616,6 +617,8 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -707,8 +710,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210827144239-02619b876842 h1:JCrt5MIE1fHQtdy1825HwJ45oVQaqHE6lgssRhjcg/o= -github.com/google/pprof v0.0.0-20210827144239-02619b876842/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0 h1:zHs+jv3LO743/zFGcByu2KmpbliCU2AhjcGgrdTwSG4= +github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -717,8 +720,9 @@ github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -727,8 +731,8 @@ github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9 github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= -github.com/gophercloud/gophercloud v0.20.0 h1:1+4jrsjVhdX5omlAo4jkmFc6ftLbuXLzgFo4i6lH+Gk= -github.com/gophercloud/gophercloud v0.20.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/gophercloud v0.22.0 h1:9lFISNLafZcecT0xUveIMt3IafexC6DIV9ek1SZdSMw= +github.com/gophercloud/gophercloud v0.22.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -751,8 +755,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= -github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw= -github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.11.0 h1:Hw/G8TtRvOElqxVIhBzXciiSTbapq8hZ2XKZsXk5ZCE= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= @@ -814,6 +818,7 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -822,18 +827,18 @@ github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= -github.com/influxdata/flux v0.120.1/go.mod h1:pGSAvyAA5d3et7SSzajaYShWYXmnRnJJq2qWi+WWZ2I= +github.com/influxdata/flux v0.131.0/go.mod h1:CKvnYe6FHpTj/E0YGI7TcOZdGiYHoToOPSnoa12RtKI= github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= -github.com/influxdata/influxdb v1.9.3 h1:60F7eqotCxogyuZAjNglNRG9D6WY65KR9mcmugBx6cs= -github.com/influxdata/influxdb v1.9.3/go.mod h1:xD4ZjAgEJQO9/bX3NhFrssKtdNPi+ki1kjrttJRDhGc= +github.com/influxdata/influxdb v1.9.5 h1:4O7AC5jOA9RoqtDuD2rysXbumcEwaqWlWXmwuyK+a2s= +github.com/influxdata/influxdb v1.9.5/go.mod h1:4uPVvcry9KWQVWLxyT9641qpkRXUBN+xa0MJFFNNLKo= github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= +github.com/influxdata/pkg-config v0.2.8/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= @@ -861,8 +866,9 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -909,8 +915,8 @@ github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdA github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v0.32.0 h1:IK04cx2b/IwAAd6XLruf1Dl/n3dRXj87Uw/5qo6afVU= -github.com/linode/linodego v0.32.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM= +github.com/linode/linodego v1.1.0 h1:ZiFVUptlzuExtUbHZtXiN7I0dAOFQAyirBKb/6/n9n4= +github.com/linode/linodego v1.1.0/go.mod h1:x/7+BoaKd4unViBmS2umdjYyVAmpFtBtEXZ0wou7FYQ= github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -990,8 +996,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -1213,7 +1220,7 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE= +github.com/snowflakedb/gosnowflake v1.3.13/go.mod h1:6nfka9aTXkUNha1p1cjeeyjDvcyh7jfjp0l8kGpDBok= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -1311,6 +1318,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -1349,8 +1357,8 @@ go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1385,6 +1393,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1500,8 +1509,9 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg= -golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1516,8 +1526,9 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1637,9 +1648,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211020174200-9d6173849985 h1:LOlKVhfDyahgmqa97awczplwkjzNaELFg3zRIJ13RYo= +golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= @@ -1698,7 +1712,6 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1742,8 +1755,9 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1784,8 +1798,11 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0 h1:fPfFO7gttlXYo2ALuD3HxJzh8vaF++4youI0BkFL6GE= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1855,8 +1872,13 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 h1:3V2dxSZpz4zozWWUq36vUxXEKnSYitEH2LdsAx+RUmg= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a h1:8maMHMQp9NroHXhc3HelFX9Ay2lWlXLcdH5mw5Biz0s= +google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1967,14 +1989,14 @@ k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= -k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= +k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= +k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= -k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= +k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -1982,8 +2004,8 @@ k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw= -k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= +k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= +k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -2001,8 +2023,8 @@ k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= From 8c3eca84db3b2feae44fac43fc918cc69fad07ec Mon Sep 17 00:00:00 2001 From: Serge Catudal Date: Thu, 21 Oct 2021 16:58:40 -0400 Subject: [PATCH 016/161] Fix remote write receiver endpoint for exemplars (#9414) Signed-off-by: Serge Catudal --- tsdb/head_append.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 10c7d0795a..bd2888930c 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -305,12 +305,20 @@ func (s *memSeries) appendable(t int64, v float64) error { // AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't // use getOrCreate or make any of the lset sanity checks that Append does. -func (a *headAppender) AppendExemplar(ref uint64, _ labels.Labels, e exemplar.Exemplar) (uint64, error) { +func (a *headAppender) AppendExemplar(ref uint64, lset labels.Labels, e exemplar.Exemplar) (uint64, error) { // Check if exemplar storage is enabled. if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 { return 0, nil } + + // Get Series s := a.head.series.getByID(ref) + if s == nil { + s = a.head.series.getByHash(lset.Hash(), lset) + if s != nil { + ref = s.ref + } + } if s == nil { return 0, fmt.Errorf("unknown series ref. when trying to add exemplar: %d", ref) } From d81bbe154d8768df66004eeef8546e5b733edde8 Mon Sep 17 00:00:00 2001 From: Levi Harrison Date: Thu, 21 Oct 2021 17:14:17 -0400 Subject: [PATCH 017/161] Rule alerts/series limit updates (#9541) * Add docs and do not limit inactive alerts. Signed-off-by: Levi Harrison --- docs/configuration/recording_rules.md | 12 +++- rules/alerting.go | 7 ++- rules/alerting_test.go | 70 ++++++++++++------------ rules/recording.go | 6 +- rules/recording_test.go | 79 +++++++++++++++++---------- 5 files changed, 102 insertions(+), 72 deletions(-) diff --git a/docs/configuration/recording_rules.md b/docs/configuration/recording_rules.md index 8b12632dc5..a14aab09d8 100644 --- a/docs/configuration/recording_rules.md +++ b/docs/configuration/recording_rules.md @@ -78,8 +78,8 @@ name: # How often rules in the group are evaluated. [ interval: | default = global.evaluation_interval ] -# Limit the number of alerts and series individual rules can produce. -# 0 is no limit. +# Limit the number of alerts an alerting rule and series a recording +# rule can produce. 0 is no limit. [ limit: | default = 0 ] rules: @@ -128,3 +128,11 @@ annotations: [ : ] ``` +# Limiting alerts and series + +A limit for alerts produced by alerting rules and series produced recording rules +can be configured per-group. When the limit is exceeded, _all_ series produced +by the rule are discarded, and if it's an alerting rule, _all_ alerts for +the rule, active, pending, or inactive, are cleared as well. The event will be +recorded as an error in the evaluation, and as such no stale markers are +written. diff --git a/rules/alerting.go b/rules/alerting.go index 5e7b8975c8..28abcf2a5b 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -389,6 +389,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, r.active[h] = a } + var numActivePending int // Check if any pending alerts should be removed or fire now. Write out alert timeseries. for fp, a := range r.active { if _, ok := resultFPs[fp]; !ok { @@ -403,6 +404,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, } continue } + numActivePending++ if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration { a.State = StateFiring @@ -415,10 +417,9 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, } } - numActive := len(r.active) - if limit != 0 && numActive > limit { + if limit > 0 && numActivePending > limit { r.active = map[uint64]*Alert{} - return nil, errors.Errorf("exceeded limit of %d with %d alerts", limit, numActive) + return nil, errors.Errorf("exceeded limit of %d with %d alerts", limit, numActivePending) } return vec, nil diff --git a/rules/alerting_test.go b/rules/alerting_test.go index 94d5054454..42882f4f65 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -466,23 +466,17 @@ func TestAlertingRuleDuplicate(t *testing.T) { } func TestAlertingRuleLimit(t *testing.T) { - storage := teststorage.New(t) - defer storage.Close() + suite, err := promql.NewTest(t, ` + load 1m + metric{label="1"} 1 + metric{label="2"} 1 + `) + require.NoError(t, err) + defer suite.Close() - opts := promql.EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 10, - Timeout: 10 * time.Second, - } + require.NoError(t, suite.Run()) - engine := promql.NewEngine(opts) - ctx, cancelCtx := context.WithCancel(context.Background()) - defer cancelCtx() - - now := time.Now() - - suite := []struct { + tests := []struct { limit int err string }{ @@ -490,31 +484,37 @@ func TestAlertingRuleLimit(t *testing.T) { limit: 0, }, { - limit: 1, + limit: -1, }, { - limit: -1, - err: "exceeded limit of -1 with 1 alerts", + limit: 2, + }, + { + limit: 1, + err: "exceeded limit of 1 with 2 alerts", }, } - for _, test := range suite { - expr, _ := parser.ParseExpr(`1`) - rule := NewAlertingRule( - "foo", - expr, - time.Minute, - labels.FromStrings("test", "test"), - nil, - nil, - "", - true, log.NewNopLogger(), - ) - _, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, test.limit) - if test.err == "" { - require.NoError(t, err) - } else { - require.Equal(t, test.err, err.Error()) + expr, _ := parser.ParseExpr(`metric > 0`) + rule := NewAlertingRule( + "foo", + expr, + time.Minute, + labels.FromStrings("test", "test"), + nil, + nil, + "", + true, log.NewNopLogger(), + ) + + evalTime := time.Unix(0, 0) + + for _, test := range tests { + _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit) + if err != nil { + require.EqualError(t, err, test.err) + } else if test.err != "" { + t.Errorf("Expected errror %s, got none", test.err) } } } diff --git a/rules/recording.go b/rules/recording.go index 08a7e37ca3..0081690b81 100644 --- a/rules/recording.go +++ b/rules/recording.go @@ -99,9 +99,9 @@ func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFu return nil, fmt.Errorf("vector contains metrics with the same labelset after applying rule labels") } - numSamples := len(vector) - if limit != 0 && numSamples > limit { - return nil, fmt.Errorf("exceeded limit %d with %d samples", limit, numSamples) + numSeries := len(vector) + if limit > 0 && numSeries > limit { + return nil, fmt.Errorf("exceeded limit of %d with %d series", limit, numSeries) } rule.SetHealth(HealthGood) diff --git a/rules/recording_test.go b/rules/recording_test.go index 211b6cda67..d6b0b6c2ae 100644 --- a/rules/recording_test.go +++ b/rules/recording_test.go @@ -49,7 +49,6 @@ func TestRuleEval(t *testing.T) { name string expr parser.Expr labels labels.Labels - limit int result promql.Vector err string }{ @@ -71,38 +70,11 @@ func TestRuleEval(t *testing.T) { Point: promql.Point{V: 1, T: timestamp.FromTime(now)}, }}, }, - { - name: "underlimit", - expr: &parser.NumberLiteral{Val: 1}, - labels: labels.FromStrings("foo", "bar"), - limit: 2, - result: promql.Vector{promql.Sample{ - Metric: labels.FromStrings("__name__", "underlimit", "foo", "bar"), - Point: promql.Point{V: 1, T: timestamp.FromTime(now)}, - }}, - }, - { - name: "atlimit", - expr: &parser.NumberLiteral{Val: 1}, - labels: labels.FromStrings("foo", "bar"), - limit: 1, - result: promql.Vector{promql.Sample{ - Metric: labels.FromStrings("__name__", "atlimit", "foo", "bar"), - Point: promql.Point{V: 1, T: timestamp.FromTime(now)}, - }}, - }, - { - name: "overlimit", - expr: &parser.NumberLiteral{Val: 1}, - labels: labels.FromStrings("foo", "bar"), - limit: -1, - err: "exceeded limit -1 with 1 samples", - }, } for _, test := range suite { rule := NewRecordingRule(test.name, test.expr, test.labels) - result, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, test.limit) + result, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, 0) if test.err == "" { require.NoError(t, err) } else { @@ -151,3 +123,52 @@ func TestRuleEvalDuplicate(t *testing.T) { require.Error(t, err) require.EqualError(t, err, "vector contains metrics with the same labelset after applying rule labels") } + +func TestRecordingRuleLimit(t *testing.T) { + suite, err := promql.NewTest(t, ` + load 1m + metric{label="1"} 1 + metric{label="2"} 1 + `) + require.NoError(t, err) + defer suite.Close() + + require.NoError(t, suite.Run()) + + tests := []struct { + limit int + err string + }{ + { + limit: 0, + }, + { + limit: -1, + }, + { + limit: 2, + }, + { + limit: 1, + err: "exceeded limit of 1 with 2 series", + }, + } + + expr, _ := parser.ParseExpr(`metric > 0`) + rule := NewRecordingRule( + "foo", + expr, + labels.FromStrings("test", "test"), + ) + + evalTime := time.Unix(0, 0) + + for _, test := range tests { + _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit) + if err != nil { + require.EqualError(t, err, test.err) + } else if test.err != "" { + t.Errorf("Expected error %s, got none", test.err) + } + } +} From 97b07388953b860380a95e7d3e72fca8f733842a Mon Sep 17 00:00:00 2001 From: Will Tran Date: Thu, 21 Oct 2021 17:28:37 -0400 Subject: [PATCH 018/161] add --max-block-duration in promtool create-blocks-from rules (#9511) * support maxBlockDuration for promtool tsdb create-blocks-from rules Fixes #9465 Signed-off-by: Will Tran * don't hardcode 2h as the default block size in rules test Signed-off-by: Will Tran --- cmd/promtool/backfill.go | 7 ++++++- cmd/promtool/main.go | 13 +++++++------ cmd/promtool/rules.go | 18 ++++++++++-------- cmd/promtool/rules_test.go | 36 +++++++++++++++++++++--------------- 4 files changed, 44 insertions(+), 30 deletions(-) diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 3d05260e30..db6eddcd64 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -66,7 +66,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) { return maxt, mint, nil } -func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) { +func getCompatibleBlockDuration(maxBlockDuration int64) int64 { blockDuration := tsdb.DefaultBlockDuration if maxBlockDuration > tsdb.DefaultBlockDuration { ranges := tsdb.ExponentialBlockRanges(tsdb.DefaultBlockDuration, 10, 3) @@ -79,6 +79,11 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn } blockDuration = ranges[idx] } + return blockDuration +} + +func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) { + blockDuration := getCompatibleBlockDuration(maxBlockDuration) mint = blockDuration * (mint / blockDuration) db, err := tsdb.OpenDBReadOnly(outputDir, nil) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index ae46d84e58..22e7d16ff8 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -250,7 +250,7 @@ func main() { os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) case importRulesCmd.FullCommand(): - os.Exit(checkErr(importRules(*importRulesURL, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *importRulesFiles...))) + os.Exit(checkErr(importRules(*importRulesURL, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) } } @@ -927,7 +927,7 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) { // importRules backfills recording rules from the files provided. The output are blocks of data // at the outputDir location. -func importRules(url *url.URL, start, end, outputDir string, evalInterval time.Duration, files ...string) error { +func importRules(url *url.URL, start, end, outputDir string, evalInterval time.Duration, maxBlockDuration time.Duration, files ...string) error { ctx := context.Background() var stime, etime time.Time var err error @@ -950,10 +950,11 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval time.D } cfg := ruleImporterConfig{ - outputDir: outputDir, - start: stime, - end: etime, - evalInterval: evalInterval, + outputDir: outputDir, + start: stime, + end: etime, + evalInterval: evalInterval, + maxBlockDuration: maxBlockDuration, } client, err := api.NewClient(api.Config{ Address: url.String(), diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index 642cc1ed0d..2c35d01051 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -48,10 +48,11 @@ type ruleImporter struct { } type ruleImporterConfig struct { - outputDir string - start time.Time - end time.Time - evalInterval time.Duration + outputDir string + start time.Time + end time.Time + evalInterval time.Duration + maxBlockDuration time.Duration } // newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series @@ -83,7 +84,7 @@ func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) { for i, r := range group.Rules() { level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name()) - if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, group); err != nil { + if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil { errs = append(errs, err) } } @@ -92,8 +93,9 @@ func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) { } // importRule queries a prometheus API to evaluate rules at times in the past. -func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName string, ruleLabels labels.Labels, start, end time.Time, grp *rules.Group) (err error) { - blockDuration := tsdb.DefaultBlockDuration +func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName string, ruleLabels labels.Labels, start, end time.Time, + maxBlockDuration int64, grp *rules.Group) (err error) { + blockDuration := getCompatibleBlockDuration(maxBlockDuration) startInMs := start.Unix() * int64(time.Second/time.Millisecond) endInMs := end.Unix() * int64(time.Second/time.Millisecond) @@ -130,7 +132,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName // also need to append samples throughout the whole block range. To allow that, we // pretend that the block is twice as large here, but only really add sample in the // original interval later. - w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*tsdb.DefaultBlockDuration) + w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration) if err != nil { return errors.Wrap(err, "new block writer") } diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index c81caaa164..685e4bd11b 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -38,6 +38,8 @@ func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r return mockAPI.samples, v1.Warnings{}, nil } +const defaultBlockDuration = time.Duration(tsdb.DefaultBlockDuration) * time.Millisecond + // TestBackfillRuleIntegration is an integration test that runs all the rule importer code to confirm the parts work together. func TestBackfillRuleIntegration(t *testing.T) { const ( @@ -46,23 +48,26 @@ func TestBackfillRuleIntegration(t *testing.T) { testValue2 = 98 ) var ( - start = time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC) - testTime = model.Time(start.Add(-9 * time.Hour).Unix()) - testTime2 = model.Time(start.Add(-8 * time.Hour).Unix()) + start = time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC) + testTime = model.Time(start.Add(-9 * time.Hour).Unix()) + testTime2 = model.Time(start.Add(-8 * time.Hour).Unix()) + twentyFourHourDuration, _ = time.ParseDuration("24h") ) var testCases = []struct { name string runcount int + maxBlockDuration time.Duration expectedBlockCount int expectedSeriesCount int expectedSampleCount int samples []*model.SampleStream }{ - {"no samples", 1, 0, 0, 0, []*model.SampleStream{}}, - {"run importer once", 1, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}}, - {"run importer with dup name label", 1, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"__name__": "val1", "name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}}, - {"one importer twice", 2, 8, 4, 8, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}, {Timestamp: testTime2, Value: testValue2}}}}}, + {"no samples", 1, defaultBlockDuration, 0, 0, 0, []*model.SampleStream{}}, + {"run importer once", 1, defaultBlockDuration, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}}, + {"run importer with dup name label", 1, defaultBlockDuration, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"__name__": "val1", "name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}}, + {"one importer twice", 2, defaultBlockDuration, 8, 4, 8, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}, {Timestamp: testTime2, Value: testValue2}}}}}, + {"run importer once with larger blocks", 1, twentyFourHourDuration, 4, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}}, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { @@ -76,7 +81,8 @@ func TestBackfillRuleIntegration(t *testing.T) { // Execute the test more than once to simulate running the rule importer twice with the same data. // We expect duplicate blocks with the same series are created when run more than once. for i := 0; i < tt.runcount; i++ { - ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples) + + ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples, tt.maxBlockDuration) require.NoError(t, err) path1 := filepath.Join(tmpDir, "test.file") require.NoError(t, createSingleRuleTestFiles(path1)) @@ -162,13 +168,14 @@ func TestBackfillRuleIntegration(t *testing.T) { } } -func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix) (*ruleImporter, error) { +func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { logger := log.NewNopLogger() cfg := ruleImporterConfig{ - outputDir: tmpDir, - start: start.Add(-10 * time.Hour), - end: start.Add(-7 * time.Hour), - evalInterval: 60 * time.Second, + outputDir: tmpDir, + start: start.Add(-10 * time.Hour), + end: start.Add(-7 * time.Hour), + evalInterval: 60 * time.Second, + maxBlockDuration: maxBlockDuration, } return newRuleImporter(logger, cfg, mockQueryRangeAPI{ @@ -225,8 +232,7 @@ func TestBackfillLabels(t *testing.T) { Values: []model.SamplePair{{Timestamp: model.TimeFromUnixNano(start.UnixNano()), Value: 123}}, }, } - ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, mockAPISamples) - require.NoError(t, err) + ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, mockAPISamples, defaultBlockDuration) path := filepath.Join(tmpDir, "test.file") recordingRules := `groups: From 92ba644a3799b6b115bf30bdef99d9e09321112d Mon Sep 17 00:00:00 2001 From: Ben Kochie Date: Thu, 21 Oct 2021 23:36:13 +0200 Subject: [PATCH 019/161] Bump indirect modules (#9562) Update to current versions for indirect modules. Signed-off-by: SuperQ --- go.mod | 4 ++-- go.sum | 29 ++++++++++++++++++++--------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 32d29cd72a..e083df3825 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a github.com/aws/aws-sdk-go v1.41.7 github.com/cespare/xxhash/v2 v2.1.2 - github.com/containerd/containerd v1.5.4 // indirect + github.com/containerd/containerd v1.5.7 // indirect github.com/dennwc/varint v1.0.0 github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 github.com/digitalocean/godo v1.69.1 @@ -36,7 +36,7 @@ require ( github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b github.com/linode/linodego v1.1.0 github.com/miekg/dns v1.1.43 - github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 diff --git a/go.sum b/go.sum index e5e134cbba..eaa5edc9ac 100644 --- a/go.sum +++ b/go.sum @@ -53,8 +53,9 @@ github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v58.2.0+incompatible h1:iCb2tuoEm3N7ZpUDOvu1Yxl1B3iOVDmaD6weaRuIPzs= github.com/Azure/azure-sdk-for-go v58.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -125,7 +126,7 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -198,6 +199,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= @@ -223,6 +225,7 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -230,6 +233,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= @@ -273,8 +277,8 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.4 h1:uPF0og3ByFzDnaStfiQj3fVGTEtaSNyU+bW7GR/nqGA= -github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -336,6 +340,7 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -603,6 +608,7 @@ github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -823,8 +829,9 @@ github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= github.com/influxdata/flux v0.131.0/go.mod h1:CKvnYe6FHpTj/E0YGI7TcOZdGiYHoToOPSnoa12RtKI= @@ -990,8 +997,8 @@ github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2J github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1062,14 +1069,17 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= @@ -1215,8 +1225,9 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -1618,7 +1629,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1640,6 +1650,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 5b75a97b2ce42fa712073fc0a297691977f9dbb0 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Fri, 22 Oct 2021 10:32:03 +0200 Subject: [PATCH 020/161] Use download mode for update-go-deps Add `-d` to go get to avoid trying to build/install packages when running `make update-go-deps`. Tested with 1.14. Signed-off-by: SuperQ --- Makefile.common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.common b/Makefile.common index 99e8f9f1b3..ed7d1826e4 100644 --- a/Makefile.common +++ b/Makefile.common @@ -160,7 +160,7 @@ endif update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get $$m; \ + $(GO) get -d $$m; \ done GO111MODULE=$(GO111MODULE) $(GO) mod tidy ifneq (,$(wildcard vendor)) From 21834bca6b5e44566602ea9315c8088dd82e5fad Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Fri, 22 Oct 2021 16:39:48 +0200 Subject: [PATCH 021/161] Release 2.31.0-rc.0 (#9556) * Release 2.31.0-rc.0 Signed-off-by: Julien Pivotto --- CHANGELOG.md | 24 ++++++++++++++++++++++++ VERSION | 2 +- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 940e6694d9..c79ecd8a17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,27 @@ +## 2.31.0-rc.0 / 2021-10-21 + +* [FEATURE] PromQL: Add trigonometric functions and `atan2` binary operator. #9239 #9248 #9515 +* [FEATURE] Remote: Add support for exemplar in the remote write receiver endpoint. #9319 #9414 +* [FEATURE] SD: Add PuppetDB service discovery. #8883 +* [FEATURE] SD: Add Uyuni service discovery. #8190 +* [FEATURE] Web: Add support for security-related HTTP headers. #9546 +* [ENHANCEMENT] Azure SD: Add `proxy_url`, `follow_redirects`, `tls_config`. #9267 +* [ENHANCEMENT] Backfill: Add `--max-block-duration` in `promtool create-blocks-from rules`. #9511 +* [ENHANCEMENT] Config: Print human-readable sizes with unit instead of raw numbers. #9361 +* [ENHANCEMENT] HTTP: Re-enable HTTP/2. #9398 +* [ENHANCEMENT] Kubernetes SD: Warn user if number of endpoints exceeds limit. #9467 +* [ENHANCEMENT] OAuth2: Add TLS configuration to token requests. #9550 +* [ENHANCEMENT] PromQL: Several optimizations. #9365 #9360 #9362 #9552 +* [ENHANCEMENT] PromQL: Make aggregations deterministic in instant queries. #9459 +* [ENHANCEMENT] Rules: Add the ability to limit number of alerts or series. #9260 #9541 +* [ENHANCEMENT] SD: Experimental discovery manager to avoid restarts upon reload. Disabled by default, enable with flag `--enable-feature=new-service-discovery-manager`. #9349 #9537 +* [ENHANCEMENT] UI: Debounce timerange setting changes. #9359 +* [BUGFIX] Backfill: Apply rule labels after query labels. #9421 +* [BUGFIX] Scrape: Resolve conflicts between multiple exported label prefixes. #9479 #9518 +* [BUGFIX] Scrape: Restart scrape loops when `__scrape_interval__` is changed. #9551 +* [BUGFIX] TSDB: Fix memory leak in samples deletion. #9151 +* [BUGFIX] UI: Use consistent margin-bottom for all alert kinds. #9318 + ## 2.30.3 / 2021-10-05 * [BUGFIX] TSDB: Fix panic on failed snapshot replay. #9438 diff --git a/VERSION b/VERSION index e88ba89bab..1fe977a4da 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.30.3 +2.31.0-rc.0 From 1c624c58ca934f618be737b4995e22051f5724c1 Mon Sep 17 00:00:00 2001 From: Ben Kochie Date: Sat, 23 Oct 2021 00:36:59 +0200 Subject: [PATCH 022/161] Replace deprecated linter (#9571) Upstream is replacing `golint` with `revive`. Signed-off-by: SuperQ --- .golangci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index 642cf45c86..f9d72c01e2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,7 +4,7 @@ run: linters: enable: - depguard - - golint + - revive issues: exclude-rules: From 73255e15f6b46affc9df86fa202e5ad363f4582b Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Sat, 23 Oct 2021 00:45:29 +0200 Subject: [PATCH 023/161] Address golint failures from revive Signed-off-by: Julien Pivotto --- .github/workflows/golangci-lint.yml | 2 ++ cmd/promtool/rules.go | 2 +- cmd/promtool/rules_test.go | 1 + discovery/triton/triton.go | 4 ++-- tsdb/errors/errors.go | 2 +- tsdb/exemplar.go | 2 +- tsdb/wal/wal.go | 4 ++-- 7 files changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 431fef711f..f96c76a659 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -7,6 +7,7 @@ on: - "**.go" - "scripts/errcheck_excludes.txt" - ".github/workflows/golangci-lint.yml" + - ".golangci.yml" pull_request: paths: - "go.sum" @@ -14,6 +15,7 @@ on: - "**.go" - "scripts/errcheck_excludes.txt" - ".github/workflows/golangci-lint.yml" + - ".golangci.yml" jobs: golangci: diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index 2c35d01051..ff98e80d8a 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -170,7 +170,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName } } default: - return errors.New(fmt.Sprintf("rule result is wrong type %s", val.Type().String())) + return fmt.Errorf("rule result is wrong type %s", val.Type().String()) } if err := app.flushAndCommit(ctx); err != nil { diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index 685e4bd11b..d1f8938b98 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -233,6 +233,7 @@ func TestBackfillLabels(t *testing.T) { }, } ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, mockAPISamples, defaultBlockDuration) + require.NoError(t, err) path := filepath.Join(tmpDir, "test.file") recordingRules := `groups: diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index 187d31ab63..96b9f39b1f 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -188,7 +188,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { case "cn": endpointFormat = "https://%s:%d/v%d/gz/discover" default: - return nil, errors.New(fmt.Sprintf("unknown role '%s' in configuration", d.sdConfig.Role)) + return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role) } var endpoint = fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version) if len(d.sdConfig.Groups) > 0 { @@ -223,7 +223,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { case "cn": return d.processComputeNodeResponse(data, endpoint) default: - return nil, errors.New(fmt.Sprintf("unknown role '%s' in configuration", d.sdConfig.Role)) + return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role) } } diff --git a/tsdb/errors/errors.go b/tsdb/errors/errors.go index aeac4d2771..607a7782a2 100644 --- a/tsdb/errors/errors.go +++ b/tsdb/errors/errors.go @@ -24,7 +24,7 @@ import ( type multiError []error // NewMulti returns multiError with provided errors added if not nil. -func NewMulti(errs ...error) multiError { // nolint:golint +func NewMulti(errs ...error) multiError { // nolint:revive m := multiError{} m.Add(errs...) return m diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 1e09da21e1..086e696675 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -284,7 +284,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int { // This math is essentially looking at nextIndex, where we would write the next exemplar to, // and find the index in the old exemplar buffer that we should start migrating exemplars from. // This way we don't migrate exemplars that would just be overwritten when migrating later exemplars. - var startIndex int64 = (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) + var startIndex = (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) for i := int64(0); i < count; i++ { idx := (startIndex + i) % int64(len(oldBuffer)) diff --git a/tsdb/wal/wal.go b/tsdb/wal/wal.go index c5023b0fdb..7254fc4c75 100644 --- a/tsdb/wal/wal.go +++ b/tsdb/wal/wal.go @@ -875,7 +875,7 @@ type segmentBufReader struct { off int // Offset of read data into current segment. } -// nolint:golint // TODO: Consider exporting segmentBufReader +// nolint:revive // TODO: Consider exporting segmentBufReader func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { return &segmentBufReader{ buf: bufio.NewReaderSize(segs[0], 16*pageSize), @@ -883,7 +883,7 @@ func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { } } -// nolint:golint +// nolint:revive func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (sbr *segmentBufReader, err error) { if offset == 0 { return NewSegmentBufReader(segs...), nil From 3cd2c033e23265ee6f65d7ee6538ecf0bd0b6dfc Mon Sep 17 00:00:00 2001 From: SuperQ Date: Fri, 22 Oct 2021 23:15:33 +0200 Subject: [PATCH 024/161] Use Go 1.16+ install for mixin tests Use new `go install` syntax to fetch tools. Signed-off-by: SuperQ --- .circleci/config.yml | 22 ++++------- documentation/prometheus-mixin/Makefile | 4 ++ documentation/prometheus-mixin/go.mod | 8 ---- documentation/prometheus-mixin/go.sum | 49 ------------------------- documentation/prometheus-mixin/tools.go | 26 ------------- 5 files changed, 11 insertions(+), 98 deletions(-) delete mode 100644 documentation/prometheus-mixin/go.mod delete mode 100644 documentation/prometheus-mixin/go.sum delete mode 100644 documentation/prometheus-mixin/tools.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 01b2b10a1e..6c76f100b7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -99,21 +99,13 @@ jobs: steps: - checkout - run: go install ./cmd/promtool/. - - run: - command: go install -mod=readonly github.com/google/go-jsonnet/cmd/jsonnet github.com/google/go-jsonnet/cmd/jsonnetfmt github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: make clean - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: jb install - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: make - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: git diff --exit-code - working_directory: ~/project/documentation/prometheus-mixin + - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest + - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest + - run: go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest + - run: make -C documentation/prometheus-mixin clean + - run: make -C documentation/prometheus-mixin jb_install + - run: make -C documentation/prometheus-mixin + - run: git diff --exit-code repo_sync: executor: golang diff --git a/documentation/prometheus-mixin/Makefile b/documentation/prometheus-mixin/Makefile index 9ade5aa2b6..c3023274c8 100644 --- a/documentation/prometheus-mixin/Makefile +++ b/documentation/prometheus-mixin/Makefile @@ -21,5 +21,9 @@ lint: prometheus_alerts.yaml promtool check rules prometheus_alerts.yaml +.PHONY: jb_install +jb_install: + jb install + clean: rm -rf dashboards_out prometheus_alerts.yaml diff --git a/documentation/prometheus-mixin/go.mod b/documentation/prometheus-mixin/go.mod deleted file mode 100644 index 47b10d80df..0000000000 --- a/documentation/prometheus-mixin/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/prometheus/prometheus/documentation/prometheus-mixin - -go 1.15 - -require ( - github.com/google/go-jsonnet v0.16.0 - github.com/jsonnet-bundler/jsonnet-bundler v0.4.0 -) diff --git a/documentation/prometheus-mixin/go.sum b/documentation/prometheus-mixin/go.sum deleted file mode 100644 index a6c4a6ee1a..0000000000 --- a/documentation/prometheus-mixin/go.sum +++ /dev/null @@ -1,49 +0,0 @@ -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/google/go-jsonnet v0.16.0 h1:Nb4EEOp+rdeGGyB1rQ5eisgSAqrTnhf9ip+X6lzZbY0= -github.com/google/go-jsonnet v0.16.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw= -github.com/jsonnet-bundler/jsonnet-bundler v0.4.0 h1:4BKZ6LDqPc2wJDmaKnmYD/vDjUptJtnUpai802MibFc= -github.com/jsonnet-bundler/jsonnet-bundler v0.4.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/documentation/prometheus-mixin/tools.go b/documentation/prometheus-mixin/tools.go deleted file mode 100644 index 1115bb9534..0000000000 --- a/documentation/prometheus-mixin/tools.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 The prometheus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build tools -// +build tools - -// Package tools tracks dependencies for tools that used in the build process. -// See https://github.com/golang/go/issues/25922 -package tools - -import ( - _ "github.com/google/go-jsonnet/cmd/jsonnet" - _ "github.com/google/go-jsonnet/cmd/jsonnetfmt" - _ "github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb" -) From 5d409b063761604d97d4cd899cbb47e6b9b4a85c Mon Sep 17 00:00:00 2001 From: Levi Harrison Date: Sun, 24 Oct 2021 10:38:21 -0400 Subject: [PATCH 025/161] Remove `interval` and `timeout` parameters (#9578) --- scrape/scrape.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index 626ffc11f5..a6c4409737 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1200,7 +1200,7 @@ mainLoop: } } - last = sl.scrapeAndReport(sl.interval, sl.timeout, last, scrapeTime, errc) + last = sl.scrapeAndReport(last, scrapeTime, errc) select { case <-sl.parentCtx.Done(): @@ -1224,12 +1224,12 @@ mainLoop: // In the happy scenario, a single appender is used. // This function uses sl.parentCtx instead of sl.ctx on purpose. A scrape should // only be cancelled on shutdown, not on reloads. -func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last, appendTime time.Time, errc chan<- error) time.Time { +func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- error) time.Time { start := time.Now() // Only record after the first scrape. if !last.IsZero() { - targetIntervalLength.WithLabelValues(interval.String()).Observe( + targetIntervalLength.WithLabelValues(sl.interval.String()).Observe( time.Since(last).Seconds(), ) } @@ -1254,7 +1254,7 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last, app }() defer func() { - if err = sl.report(app, appendTime, timeout, time.Since(start), total, added, seriesAdded, scrapeErr); err != nil { + if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, scrapeErr); err != nil { level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) } }() @@ -1275,7 +1275,7 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last, app } var contentType string - scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, timeout) + scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, sl.timeout) contentType, scrapeErr = sl.scraper.scrape(scrapeCtx, buf) cancel() @@ -1647,7 +1647,7 @@ const ( scrapeSampleLimitMetricName = "scrape_sample_limit" + "\xff" ) -func (sl *scrapeLoop) report(app storage.Appender, start time.Time, timeout, duration time.Duration, scraped, added, seriesAdded int, scrapeErr error) (err error) { +func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded int, scrapeErr error) (err error) { sl.scraper.Report(start, duration, scrapeErr) ts := timestamp.FromTime(start) @@ -1673,7 +1673,7 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, timeout, dur return } if sl.reportScrapeTimeout { - if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, timeout.Seconds()); err != nil { + if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, sl.timeout.Seconds()); err != nil { return } if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit)); err != nil { From a6e6011d55ed913cb8e53968cead9a6a6fd84911 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Furkan=20T=C3=BCrkal?= Date: Mon, 25 Oct 2021 00:45:31 +0300 Subject: [PATCH 026/161] Add scrape_body_size_bytes metric (#9569) Fixes #9520 Signed-off-by: Furkan --- docs/feature_flags.md | 1 + scrape/scrape.go | 45 +++++++++++++++++++++++++++---------------- 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 8a8c9f70d4..adef7fc931 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -71,6 +71,7 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow - `scrape_timeout_seconds`. The configured `scrape_timeout` for a target. This allows you to measure each target to find out how close they are to timing out with `scrape_duration_seconds / scrape_timeout_seconds`. - `scrape_sample_limit`. The configured `sample_limit` for a target. This allows you to measure each target to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`. +- `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`. ## New service discovery manager diff --git a/scrape/scrape.go b/scrape/scrape.go index a6c4409737..e981ad36a3 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -264,7 +264,7 @@ const maxAheadTime = 10 * time.Minute type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportScrapeTimeout bool) (*scrapePool, error) { +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportExtraMetrics bool) (*scrapePool, error) { targetScrapePools.Inc() if logger == nil { logger = log.NewNopLogger() @@ -313,7 +313,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed opts.labelLimits, opts.interval, opts.timeout, - reportScrapeTimeout, + reportExtraMetrics, ) } @@ -861,7 +861,7 @@ type scrapeLoop struct { disabledEndOfRunStalenessMarkers bool - reportScrapeTimeout bool + reportExtraMetrics bool } // scrapeCache tracks mappings of exposed metric strings to label sets and @@ -1122,7 +1122,7 @@ func newScrapeLoop(ctx context.Context, labelLimits *labelLimits, interval time.Duration, timeout time.Duration, - reportScrapeTimeout bool, + reportExtraMetrics bool, ) *scrapeLoop { if l == nil { l = log.NewNopLogger() @@ -1149,7 +1149,7 @@ func newScrapeLoop(ctx context.Context, labelLimits: labelLimits, interval: interval, timeout: timeout, - reportScrapeTimeout: reportScrapeTimeout, + reportExtraMetrics: reportExtraMetrics, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1238,7 +1238,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er defer sl.buffers.Put(b) buf := bytes.NewBuffer(b) - var total, added, seriesAdded int + var total, added, seriesAdded, bytes int var err, appErr, scrapeErr error app := sl.appender(sl.parentCtx) @@ -1254,7 +1254,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er }() defer func() { - if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, scrapeErr); err != nil { + if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytes, scrapeErr); err != nil { level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) } }() @@ -1287,11 +1287,15 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er if len(b) > 0 { sl.lastScrapeSize = len(b) } + bytes = len(b) } else { level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr) if errc != nil { errc <- scrapeErr } + if errors.Is(scrapeErr, errBodySizeLimit) { + bytes = -1 + } } // A failed scrape is the same as an empty scrape, @@ -1638,16 +1642,17 @@ func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appE // The constants are suffixed with the invalid \xff unicode rune to avoid collisions // with scraped metrics in the cache. const ( - scrapeHealthMetricName = "up" + "\xff" - scrapeDurationMetricName = "scrape_duration_seconds" + "\xff" - scrapeSamplesMetricName = "scrape_samples_scraped" + "\xff" - samplesPostRelabelMetricName = "scrape_samples_post_metric_relabeling" + "\xff" - scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff" - scrapeTimeoutMetricName = "scrape_timeout_seconds" + "\xff" - scrapeSampleLimitMetricName = "scrape_sample_limit" + "\xff" + scrapeHealthMetricName = "up" + "\xff" + scrapeDurationMetricName = "scrape_duration_seconds" + "\xff" + scrapeSamplesMetricName = "scrape_samples_scraped" + "\xff" + samplesPostRelabelMetricName = "scrape_samples_post_metric_relabeling" + "\xff" + scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff" + scrapeTimeoutMetricName = "scrape_timeout_seconds" + "\xff" + scrapeSampleLimitMetricName = "scrape_sample_limit" + "\xff" + scrapeBodySizeBytesMetricName = "scrape_body_size_bytes" + "\xff" ) -func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded int, scrapeErr error) (err error) { +func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { sl.scraper.Report(start, duration, scrapeErr) ts := timestamp.FromTime(start) @@ -1672,13 +1677,16 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded)); err != nil { return } - if sl.reportScrapeTimeout { + if sl.reportExtraMetrics { if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, sl.timeout.Seconds()); err != nil { return } if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit)); err != nil { return } + if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, float64(bytes)); err != nil { + return + } } return } @@ -1703,13 +1711,16 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, stale); err != nil { return } - if sl.reportScrapeTimeout { + if sl.reportExtraMetrics { if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, stale); err != nil { return } if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, stale); err != nil { return } + if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, stale); err != nil { + return + } } return } From e7a12ac80ac728b8c6844991e2ecc7ee9d91c4aa Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 25 Oct 2021 12:52:13 +0200 Subject: [PATCH 027/161] Fix broken prefixed asset links in webpack build (#9586) * Fix broken prefixed asset links in webpack build Fixes https://github.com/prometheus/prometheus/issues/9585 Signed-off-by: Julius Volz * Use .env file for PUBLIC_URL npm build env var Signed-off-by: Julius Volz --- web/ui/react-app/.env | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 web/ui/react-app/.env diff --git a/web/ui/react-app/.env b/web/ui/react-app/.env new file mode 100644 index 0000000000..b4cad0c00b --- /dev/null +++ b/web/ui/react-app/.env @@ -0,0 +1,3 @@ +# This ensures that all links in the generated asset bundle will be relative, +# so that assets are loaded correctly even when a path prefix is used. +PUBLIC_URL=. From d5bfbe3114561a3329fc1bdda1d83e0133827220 Mon Sep 17 00:00:00 2001 From: Dieter Plaetinck Date: Mon, 25 Oct 2021 15:14:15 +0200 Subject: [PATCH 028/161] improve bstream comments and doc (#9560) * improve bstream comments and doc Signed-off-by: Dieter Plaetinck * feedback Signed-off-by: Dieter Plaetinck --- tsdb/README.md | 2 +- tsdb/chunkenc/bstream.go | 13 ++++++--- tsdb/chunkenc/xor.go | 8 ++++-- tsdb/docs/bstream.md | 62 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 78 insertions(+), 7 deletions(-) create mode 100644 tsdb/docs/bstream.md diff --git a/tsdb/README.md b/tsdb/README.md index 59f800c7ae..4ad8fb0f33 100644 --- a/tsdb/README.md +++ b/tsdb/README.md @@ -10,7 +10,7 @@ Based on the Gorilla TSDB [white papers](http://www.vldb.org/pvldb/vol8/p1816-te Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/). -See also the [format documentation](docs/format/README.md). +See also the [format documentation](docs/format/README.md) and [bstream details](docs/bstream.md). A series of blog posts explaining different components of TSDB: * [The Head Block](https://ganeshvernekar.com/blog/prometheus-tsdb-the-head-block/) diff --git a/tsdb/chunkenc/bstream.go b/tsdb/chunkenc/bstream.go index c8efeab1d7..833c9794b6 100644 --- a/tsdb/chunkenc/bstream.go +++ b/tsdb/chunkenc/bstream.go @@ -48,8 +48,8 @@ import ( // bstream is a stream of bits. type bstream struct { - stream []byte // the data stream - count uint8 // how many bits are valid in current byte + stream []byte // The data stream. + count uint8 // How many right-most bits are available for writing in the current byte (the last byte of the stream). } func (b *bstream) bytes() []byte { @@ -86,14 +86,17 @@ func (b *bstream) writeByte(byt byte) { i := len(b.stream) - 1 - // fill up b.b with b.count bits from byt + // Complete the last byte with the leftmost b.count bits from byt. b.stream[i] |= byt >> (8 - b.count) b.stream = append(b.stream, 0) i++ + // Write the remainder, if any. b.stream[i] = byt << b.count } +// writeBits writes the nbits right-most bits of u to the stream +// in left-to-right order. func (b *bstream) writeBits(u uint64, nbits int) { u <<= 64 - uint(nbits) for nbits >= 8 { @@ -115,7 +118,7 @@ type bstreamReader struct { streamOffset int // The offset from which read the next byte from the stream. buffer uint64 // The current buffer, filled from the stream, containing up to 8 bytes from which read bits. - valid uint8 // The number of bits valid to read (from left) in the current buffer. + valid uint8 // The number of right-most bits valid to read (from left) in the current 8 byte buffer. } func newBReader(b []byte) bstreamReader { @@ -148,6 +151,8 @@ func (b *bstreamReader) readBitFast() (bit, error) { return (b.buffer & bitmask) != 0, nil } +// readBits constructs a uint64 with the nbits right-most bits +// read from the stream, and any other bits 0. func (b *bstreamReader) readBits(nbits uint8) (uint64, error) { if b.valid == 0 { if !b.loadNextBuffer(nbits) { diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 4eabd5a953..ba00a6e811 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -200,6 +200,8 @@ func (a *xorAppender) Append(t int64, v float64) { a.tDelta = tDelta } +// bitRange returns whether the given integer can be represented by nbits. +// See docs/bstream.md. func bitRange(x int64, nbits uint8) bool { return -((1<<(nbits-1))-1) <= x && x <= 1<<(nbits-1) } @@ -372,9 +374,11 @@ func (it *xorIterator) Next() bool { it.err = err return false } + + // Account for negative numbers, which come back as high unsigned numbers. + // See docs/bstream.md. if bits > (1 << (sz - 1)) { - // or something - bits = bits - (1 << sz) + bits -= 1 << sz } dod = int64(bits) } diff --git a/tsdb/docs/bstream.md b/tsdb/docs/bstream.md new file mode 100644 index 0000000000..91dec1b148 --- /dev/null +++ b/tsdb/docs/bstream.md @@ -0,0 +1,62 @@ +# bstream details + +This doc describes details of the bstream (bitstream) and how we use it for encoding and decoding. +This doc is incomplete. For more background, see the Gorilla TSDB [white paper](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) +or the original [go-tsz](https://github.com/dgryski/go-tsz) implementation, which this code is based on. + +## Delta-of-delta encoding for timestamps + +We need to be able to encode and decode dod's for timestamps, which can be positive, zero, or negative. +Note that int64's are implemented as [2's complement](https://en.wikipedia.org/wiki/Two%27s_complement) + +and look like: + +``` +0111...111 = maxint64 + ... +0000...111 = 7 +0000...110 = 6 +0000...101 = 5 +0000...100 = 4 +0000...011 = 3 +0000...010 = 2 +0000...001 = 1 +0000...000 = 0 +1111...111 = -1 +1111...110 = -2 +1111...101 = -3 +1111...100 = -4 +1111...011 = -5 +1111...010 = -6 +1111...001 = -7 +1111...000 = -8 + ... +1000...001 = minint64+1 +1000...000 = minint64 +``` + +All numbers have a prefix (of zeroes for positive numbers, of ones for negative numbers), followed by a number of significant digits at the end. +In all cases, the smaller the absolute value of the number, the fewer the amount of significant digits. + +To encode these numbers, we use: +* A prefix which declares the amount of bits that follow (we use a predefined list of options in order of increasing number of significant bits). +* A number of bits which is one more than the number of significant bits. The extra bit is needed because we deal with unsigned integers, although + it isn't exactly a sign bit. (See below for details). + +The `bitRange` function determines whether a given integer can be represented by a number of bits. +For a given number of bits `nbits` we can distinguish (and thus encode) any set of `2^nbits` numbers. +E.g. for `nbits = 3`, we can encode 8 distinct numbers, and we have a choice of choosing our boundaries. For example -4 to 3, +-3 to 4, 0 to 7 or even -2 to 5 (always inclusive). (Observe in the list above that this is always true.) +Because we need to support positive and negative numbers equally, we choose boundaries that grow symmetrically. Following the same example, +we choose -3 to 4. + +When decoding the number, the most interesting part is how to recognize whether a number is negative or positive, and thus which prefix to set. +Note that the bstream library doesn't interpret integers to a specific type, but rather returns them as uint64's (which are really just a container for 64 bits). +Within the ranges we choose, if looked at as unsigned integers, the higher portion of the range represent the negative numbers. +Continuing the same example, the numbers 001, 010, 011 and 100 are returned as unsigned integers 1,2,3,4 and mean the same thing when casted to int64's. +But the others, 101, 110 and 111 are returned as unsigned integers 5,6,7 but actually represent -3, -2 and -1 (see list above), +The cutoff value is the value set by the `nbit`'th bit, and needs a value subtracted that is represented by the `nbit+1`th bit. +In our example, the 3rd bit sets the number 4, and the 4th sets the number 8. So if we see an unsigned integer exceeding 4 (5,6,7) we subtract 8. This gives us our desired values (-3, -2 and -1). + +Careful observers may note that, if we shift our boundaries down by one, the first bit would always indicate the sign (and imply the needed prefix). +In our example of `nbits = 3`, that would mean the range from -4 to 3. But what we have now works just fine too. From 0c07663b7026f826173f2cfb1dcb42830bbd14bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Furkan=20T=C3=BCrkal?= Date: Mon, 25 Oct 2021 16:14:40 +0300 Subject: [PATCH 029/161] fix: possible race on shared variables in test (#9470) Fixes #9433 Signed-off-by: Furkan --- tsdb/db_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 26ead3fc38..88342c8f8d 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -663,6 +663,7 @@ func TestDB_SnapshotWithDelete(t *testing.T) { numSamples := int64(10) db := openTestDB(t, nil, nil) + defer func() { require.NoError(t, db.Close()) }() ctx := context.Background() app := db.Appender(ctx) @@ -700,15 +701,14 @@ Outer: require.NoError(t, os.RemoveAll(snap)) }() require.NoError(t, db.Snapshot(snap, true)) - require.NoError(t, db.Close()) // reopen DB from snapshot - db, err = Open(snap, nil, nil, nil, nil) + newDB, err := Open(snap, nil, nil, nil, nil) require.NoError(t, err) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, newDB.Close()) }() // Compare the result. - q, err := db.Querier(context.TODO(), 0, numSamples) + q, err := newDB.Querier(context.TODO(), 0, numSamples) require.NoError(t, err) defer func() { require.NoError(t, q.Close()) }() From 25d06a9b76b9761136578e1571e5c7d45a03ca1a Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Mon, 25 Oct 2021 15:45:54 +0200 Subject: [PATCH 030/161] Release 2.31.0-rc.1 (#9587) Signed-off-by: Julien Pivotto --- CHANGELOG.md | 4 ++++ VERSION | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c79ecd8a17..38b03ceb10 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +## 2.31.0-rc.1 / 2021-10-25 + +* [BUGFIX] UI: Fix assets loading when a prefix is used in `--web.route-prefix` or `--web.external-url`. #9586 + ## 2.31.0-rc.0 / 2021-10-21 * [FEATURE] PromQL: Add trigonometric functions and `atan2` binary operator. #9239 #9248 #9515 diff --git a/VERSION b/VERSION index 1fe977a4da..4e9a8e7368 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.31.0-rc.0 +2.31.0-rc.1 From c2d1c858577cdac009162d095ec80402b049949c Mon Sep 17 00:00:00 2001 From: Xiaochao Dong Date: Tue, 26 Oct 2021 14:06:25 +0800 Subject: [PATCH 031/161] close tsdb.head in test case (#9580) Signed-off-by: Xiaochao Dong (@damnever) --- tsdb/head_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 6043927eac..1018476402 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -1397,7 +1397,7 @@ func TestWalRepair_DecodingError(t *testing.T) { err = errors.Cause(initErr) // So that we can pick up errors even if wrapped. _, corrErr := err.(*wal.CorruptionErr) require.True(t, corrErr, "reading the wal didn't return corruption error") - require.NoError(t, w.Close()) + require.NoError(t, h.Close()) // Head will close the wal as well. } // Open the db to trigger a repair. From 69e309d202087fb49a04d3da1e6ba3992486ec00 Mon Sep 17 00:00:00 2001 From: DrAuYueng Date: Thu, 28 Oct 2021 08:01:28 +0800 Subject: [PATCH 032/161] Expose TargetsFromGroup/AlertmanagerFromGroup func and reuse this for (#9343) static/file sd config check in promtool Signed-off-by: DrAuYueng --- cmd/promtool/main.go | 84 +++++++++++++++++-- cmd/promtool/main_test.go | 41 ++++++++- ...rt_targetgroup_with_relabel_config.bad.yml | 8 ++ ...t_targetgroup_with_relabel_config.good.yml | 10 +++ ...pe_targetgroup_with_relabel_config.bad.yml | 8 ++ ...e_targetgroup_with_relabel_config.good.yml | 10 +++ notifier/notifier.go | 8 +- notifier/notifier_test.go | 2 +- scrape/scrape.go | 2 +- scrape/target.go | 4 +- scrape/target_test.go | 2 +- 11 files changed, 160 insertions(+), 19 deletions(-) create mode 100644 cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml create mode 100644 cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml create mode 100644 cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml create mode 100644 cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 22e7d16ff8..9b436ff712 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -44,13 +44,16 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/file" _ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations. "github.com/prometheus/prometheus/discovery/kubernetes" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/rulefmt" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/scrape" ) func main() { @@ -363,19 +366,60 @@ func checkConfig(filename string) ([]string, error) { } if len(files) != 0 { for _, f := range files { - err = checkSDFile(f) + var targetGroups []*targetgroup.Group + targetGroups, err = checkSDFile(f) if err != nil { return nil, errors.Errorf("checking SD file %q: %v", file, err) } + if err := checkTargetGroupsForScrapeConfig(targetGroups, scfg); err != nil { + return nil, err + } } continue } fmt.Printf(" WARNING: file %q for file_sd in scrape job %q does not exist\n", file, scfg.JobName) } + case discovery.StaticConfig: + if err := checkTargetGroupsForScrapeConfig(c, scfg); err != nil { + return nil, err + } } } } + alertConfig := cfg.AlertingConfig + for _, amcfg := range alertConfig.AlertmanagerConfigs { + for _, c := range amcfg.ServiceDiscoveryConfigs { + switch c := c.(type) { + case *file.SDConfig: + for _, file := range c.Files { + files, err := filepath.Glob(file) + if err != nil { + return nil, err + } + if len(files) != 0 { + for _, f := range files { + var targetGroups []*targetgroup.Group + targetGroups, err = checkSDFile(f) + if err != nil { + return nil, errors.Errorf("checking SD file %q: %v", file, err) + } + + if err := checkTargetGroupsForAlertmanager(targetGroups, amcfg); err != nil { + return nil, err + } + } + continue + } + fmt.Printf(" WARNING: file %q for file_sd in alertmanager config does not exist\n", file) + } + case discovery.StaticConfig: + if err := checkTargetGroupsForAlertmanager(c, amcfg); err != nil { + return nil, err + } + } + } + } return ruleFiles, nil } @@ -397,16 +441,16 @@ func checkTLSConfig(tlsConfig config_util.TLSConfig) error { return nil } -func checkSDFile(filename string) error { +func checkSDFile(filename string) ([]*targetgroup.Group, error) { fd, err := os.Open(filename) if err != nil { - return err + return nil, err } defer fd.Close() content, err := ioutil.ReadAll(fd) if err != nil { - return err + return nil, err } var targetGroups []*targetgroup.Group @@ -414,23 +458,23 @@ func checkSDFile(filename string) error { switch ext := filepath.Ext(filename); strings.ToLower(ext) { case ".json": if err := json.Unmarshal(content, &targetGroups); err != nil { - return err + return nil, err } case ".yml", ".yaml": if err := yaml.UnmarshalStrict(content, &targetGroups); err != nil { - return err + return nil, err } default: - return errors.Errorf("invalid file extension: %q", ext) + return nil, errors.Errorf("invalid file extension: %q", ext) } for i, tg := range targetGroups { if tg == nil { - return errors.Errorf("nil target group item found (index %d)", i) + return nil, errors.Errorf("nil target group item found (index %d)", i) } } - return nil + return targetGroups, nil } // CheckRules validates rule files. @@ -981,3 +1025,25 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval time.D return nil } + +func checkTargetGroupsForAlertmanager(targetGroups []*targetgroup.Group, amcfg *config.AlertmanagerConfig) error { + for _, tg := range targetGroups { + if _, _, err := notifier.AlertmanagerFromGroup(tg, amcfg); err != nil { + return err + } + } + + return nil +} + +func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error { + for _, tg := range targetGroups { + _, failures := scrape.TargetsFromGroup(tg, scfg) + if len(failures) > 0 { + first := failures[0] + return first + } + } + + return nil +} diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 87ac13e76c..8ca078cc57 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -111,7 +111,7 @@ func TestCheckSDFile(t *testing.T) { } for _, test := range cases { t.Run(test.name, func(t *testing.T) { - err := checkSDFile(test.file) + _, err := checkSDFile(test.file) if test.err != "" { require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) return @@ -163,3 +163,42 @@ func BenchmarkCheckDuplicates(b *testing.B) { checkDuplicates(rgs.Groups) } } + +func TestCheckTargetConfig(t *testing.T) { + cases := []struct { + name string + file string + err string + }{ + { + name: "url_in_scrape_targetgroup_with_relabel_config.good", + file: "url_in_scrape_targetgroup_with_relabel_config.good.yml", + err: "", + }, + { + name: "url_in_alert_targetgroup_with_relabel_config.good", + file: "url_in_alert_targetgroup_with_relabel_config.good.yml", + err: "", + }, + { + name: "url_in_scrape_targetgroup_with_relabel_config.bad", + file: "url_in_scrape_targetgroup_with_relabel_config.bad.yml", + err: "instance 0 in group 0: \"http://bad\" is not a valid hostname", + }, + { + name: "url_in_alert_targetgroup_with_relabel_config.bad", + file: "url_in_alert_targetgroup_with_relabel_config.bad.yml", + err: "\"http://bad\" is not a valid hostname", + }, + } + for _, test := range cases { + t.Run(test.name, func(t *testing.T) { + _, err := checkConfig("testdata/" + test.file) + if test.err != "" { + require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + return + } + require.NoError(t, err) + }) + } +} diff --git a/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml b/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml new file mode 100644 index 0000000000..a23628e187 --- /dev/null +++ b/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml @@ -0,0 +1,8 @@ +alerting: + alertmanagers: + - relabel_configs: + - source_labels: [__address__] + target_label: __param_target + static_configs: + - targets: + - http://bad diff --git a/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml b/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml new file mode 100644 index 0000000000..575fe5f11b --- /dev/null +++ b/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml @@ -0,0 +1,10 @@ +alerting: + alertmanagers: + - relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: good + static_configs: + - targets: + - http://bad diff --git a/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml b/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml new file mode 100644 index 0000000000..d5daf6e9b1 --- /dev/null +++ b/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + static_configs: + - targets: + - http://bad diff --git a/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml b/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml new file mode 100644 index 0000000000..ab26f00ed6 --- /dev/null +++ b/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml @@ -0,0 +1,10 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: good + static_configs: + - targets: + - http://good diff --git a/notifier/notifier.go b/notifier/notifier.go index 97086d5629..19a7f0bf9a 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -602,7 +602,7 @@ func (n *Manager) Stop() { n.cancel() } -// alertmanager holds Alertmanager endpoint information. +// Alertmanager holds Alertmanager endpoint information. type alertmanager interface { url() *url.URL } @@ -654,7 +654,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { allDroppedAms := []alertmanager{} for _, tg := range tgs { - ams, droppedAms, err := alertmanagerFromGroup(tg, s.cfg) + ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) if err != nil { level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err) continue @@ -691,9 +691,9 @@ func postPath(pre string, v config.AlertmanagerAPIVersion) string { return path.Join("/", pre, alertPushEndpoint) } -// alertmanagerFromGroup extracts a list of alertmanagers from a target group +// AlertmanagerFromGroup extracts a list of alertmanagers from a target group // and an associated AlertmanagerConfig. -func alertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { +func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { var res []alertmanager var droppedAlertManagers []alertmanager diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index e4a7f26cdc..b04b07e947 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -447,7 +447,7 @@ func (a alertmanagerMock) url() *url.URL { func TestLabelSetNotReused(t *testing.T) { tg := makeInputTargetGroup() - _, _, err := alertmanagerFromGroup(tg, &config.AlertmanagerConfig{}) + _, _, err := AlertmanagerFromGroup(tg, &config.AlertmanagerConfig{}) require.NoError(t, err) diff --git a/scrape/scrape.go b/scrape/scrape.go index e981ad36a3..590133cde8 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -472,7 +472,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { var all []*Target sp.droppedTargets = []*Target{} for _, tg := range tgs { - targets, failures := targetsFromGroup(tg, sp.config) + targets, failures := TargetsFromGroup(tg, sp.config) for _, err := range failures { level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) } diff --git a/scrape/target.go b/scrape/target.go index 35d691751d..c791a8a22d 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -469,8 +469,8 @@ func populateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab return res, preRelabelLabels, nil } -// targetsFromGroup builds targets based on the given TargetGroup and config. -func targetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Target, []error) { +// TargetsFromGroup builds targets based on the given TargetGroup and config. +func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Target, []error) { targets := make([]*Target, 0, len(tg.Targets)) failures := []error{} diff --git a/scrape/target_test.go b/scrape/target_test.go index 542326d7c4..fa03c068ea 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -371,7 +371,7 @@ func TestNewClientWithBadTLSConfig(t *testing.T) { func TestTargetsFromGroup(t *testing.T) { expectedError := "instance 0 in group : no address" - targets, failures := targetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &config.ScrapeConfig{}) + targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &config.ScrapeConfig{}) if len(targets) != 1 { t.Fatalf("Expected 1 target, got %v", len(targets)) } From c91c2bbea5e96c7a7101e4914736b854d2f1d313 Mon Sep 17 00:00:00 2001 From: David Leadbeater Date: Thu, 28 Oct 2021 22:17:18 +1100 Subject: [PATCH 033/161] promtool: Show more human readable got/exp output (#8064) Avoid using %#v, nothing needs to parse this, so escaping " and so on leads to hard to read output. Add new lines, number and indentation to each alert series output. Signed-off-by: David Leadbeater --- cmd/promtool/unittest.go | 62 ++++++++++++++++++++++------------------ 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 3ddec31522..5dcc5bb56c 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -47,6 +47,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) + fmt.Println() } failed = true } else { @@ -313,30 +314,18 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i }) } - var sb strings.Builder - if gotAlerts.Len() != expAlerts.Len() { + sort.Sort(gotAlerts) + sort.Sort(expAlerts) + + if !reflect.DeepEqual(expAlerts, gotAlerts) { + var testName string if tg.TestGroupName != "" { - fmt.Fprintf(&sb, " name: %s,\n", tg.TestGroupName) - } - fmt.Fprintf(&sb, " alertname:%s, time:%s, \n", testcase.Alertname, testcase.EvalTime.String()) - fmt.Fprintf(&sb, " exp:%#v, \n", expAlerts.String()) - fmt.Fprintf(&sb, " got:%#v", gotAlerts.String()) - - errs = append(errs, errors.New(sb.String())) - } else { - sort.Sort(gotAlerts) - sort.Sort(expAlerts) - - if !reflect.DeepEqual(expAlerts, gotAlerts) { - if tg.TestGroupName != "" { - fmt.Fprintf(&sb, " name: %s,\n", tg.TestGroupName) - } - fmt.Fprintf(&sb, " alertname:%s, time:%s, \n", testcase.Alertname, testcase.EvalTime.String()) - fmt.Fprintf(&sb, " exp:%#v, \n", expAlerts.String()) - fmt.Fprintf(&sb, " got:%#v", gotAlerts.String()) - - errs = append(errs, errors.New(sb.String())) + testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName) } + expString := indentLines(expAlerts.String(), " ") + gotString := indentLines(gotAlerts.String(), " ") + errs = append(errs, errors.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v", + testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString)) } } @@ -385,7 +374,7 @@ Outer: return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0 }) if !reflect.DeepEqual(expSamples, gotSamples) { - errs = append(errs, errors.Errorf(" expr: %q, time: %s,\n exp:%#v\n got:%#v", testCase.Expr, + errs = append(errs, errors.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr, testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples))) } } @@ -468,6 +457,23 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q } } +// indentLines prefixes each line in the supplied string with the given "indent" +// string. +func indentLines(lines, indent string) string { + sb := strings.Builder{} + n := strings.Split(lines, "\n") + for i, l := range n { + if i > 0 { + sb.WriteString(indent) + } + sb.WriteString(l) + if i != len(n)-1 { + sb.WriteRune('\n') + } + } + return sb.String() +} + type labelsAndAnnotations []labelAndAnnotation func (la labelsAndAnnotations) Len() int { return len(la) } @@ -484,11 +490,11 @@ func (la labelsAndAnnotations) String() string { if len(la) == 0 { return "[]" } - s := "[" + la[0].String() - for _, l := range la[1:] { - s += ", " + l.String() + s := "[\n0:" + indentLines("\n"+la[0].String(), " ") + for i, l := range la[1:] { + s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ") } - s += "]" + s += "\n]" return s } @@ -499,7 +505,7 @@ type labelAndAnnotation struct { } func (la *labelAndAnnotation) String() string { - return "Labels:" + la.Labels.String() + " Annotations:" + la.Annotations.String() + return "Labels:" + la.Labels.String() + "\nAnnotations:" + la.Annotations.String() } type series struct { From bc72a718c4e537f8cff37e638320708de1788041 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Fri, 29 Oct 2021 11:25:05 -0400 Subject: [PATCH 034/161] Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju * changes as per code review comments Signed-off-by: SriKrishna Paparaju * Commit feedback from code review Co-authored-by: Bartlomiej Plotka Co-authored-by: Ganesh Vernekar Signed-off-by: Robert Fratto * Port over some comments from grafana/agent Signed-off-by: Robert Fratto * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto Co-authored-by: SriKrishna Paparaju --- .gitignore | 2 + cmd/prometheus/main.go | 296 ++++++++++++-- cmd/prometheus/main_test.go | 20 + storage/remote/storage.go | 5 + storage/remote/write.go | 21 + tsdb/agent/db.go | 761 ++++++++++++++++++++++++++++++++++++ tsdb/agent/db_test.go | 449 +++++++++++++++++++++ tsdb/agent/series.go | 177 +++++++++ web/api/v1/api.go | 56 +-- web/web.go | 2 + web/web_test.go | 62 +++ 11 files changed, 1802 insertions(+), 49 deletions(-) create mode 100644 tsdb/agent/db.go create mode 100644 tsdb/agent/db_test.go create mode 100644 tsdb/agent/series.go diff --git a/.gitignore b/.gitignore index b382f6c0ec..35564c059c 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,9 @@ /promtool benchmark.txt /data +/data-agent /cmd/prometheus/data +/cmd/prometheus/data-agent /cmd/prometheus/debug /benchout /cmd/promtool/data diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index d50cd3e7f3..e69e9f2dbd 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -72,11 +72,14 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/agent" "github.com/prometheus/prometheus/util/strutil" "github.com/prometheus/prometheus/web" ) var ( + appName = "prometheus" + configSuccess = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_config_last_reload_successful", Help: "Whether the last configuration reload attempt was successful.", @@ -88,10 +91,13 @@ var ( defaultRetentionString = "15d" defaultRetentionDuration model.Duration + + agentMode bool + agentOnlyFlags, serverOnlyFlags []string ) func init() { - prometheus.MustRegister(version.NewCollector("prometheus")) + prometheus.MustRegister(version.NewCollector(strings.ReplaceAll(appName, "-", "_"))) var err error defaultRetentionDuration, err = model.ParseDuration(defaultRetentionString) @@ -100,6 +106,37 @@ func init() { } } +// agentOnlySetting can be provided to a kingpin flag's PreAction to mark a +// flag as agent-only. +func agentOnlySetting() func(*kingpin.ParseContext) error { + return func(pc *kingpin.ParseContext) error { + agentOnlyFlags = append(agentOnlyFlags, extractFlagName(pc)) + return nil + } +} + +// serverOnlySetting can be provided to a kingpin flag's PreAction to mark a +// flag as server-only. +func serverOnlySetting() func(*kingpin.ParseContext) error { + return func(pc *kingpin.ParseContext) error { + serverOnlyFlags = append(serverOnlyFlags, extractFlagName(pc)) + return nil + } +} + +// extractFlagName gets the flag name from the ParseContext. Only call +// from agentOnlySetting or serverOnlySetting. +func extractFlagName(pc *kingpin.ParseContext) string { + for _, pe := range pc.Elements { + fc, ok := pe.Clause.(*kingpin.FlagClause) + if !ok { + continue + } + return fc.Model().Name + } + panic("extractFlagName not called from a kingpin PreAction. This is a bug, please report to Prometheus.") +} + type flagConfig struct { configFile string @@ -111,6 +148,7 @@ type flagConfig struct { web web.Options scrape scrape.Options tsdb tsdbOptions + agent agentOptions lookbackDelta model.Duration webTimeout model.Duration queryTimeout model.Duration @@ -196,10 +234,12 @@ func main() { a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout) - a.Version(version.Print("prometheus")) + a.Version(version.Print(appName)) a.HelpFlag.Short('h') + a.Flag("agent", "Agent mode.").BoolVar(&agentMode) + a.Flag("config.file", "Prometheus configuration file path."). Default("prometheus.yml").StringVar(&cfg.configFile) @@ -245,60 +285,105 @@ func main() { Default(".*").StringVar(&cfg.corsRegexString) a.Flag("storage.tsdb.path", "Base path for metrics storage."). + PreAction(serverOnlySetting()). Default("data/").StringVar(&cfg.localStoragePath) a.Flag("storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing."). + PreAction(serverOnlySetting()). Hidden().Default("2h").SetValue(&cfg.tsdb.MinBlockDuration) a.Flag("storage.tsdb.max-block-duration", "Maximum duration compacted blocks may span. For use in testing. (Defaults to 10% of the retention period.)"). + PreAction(serverOnlySetting()). Hidden().PlaceHolder("").SetValue(&cfg.tsdb.MaxBlockDuration) a.Flag("storage.tsdb.max-block-chunk-segment-size", "The maximum size for a single chunk segment in a block. Example: 512MB"). + PreAction(serverOnlySetting()). Hidden().PlaceHolder("").BytesVar(&cfg.tsdb.MaxBlockChunkSegmentSize) a.Flag("storage.tsdb.wal-segment-size", "Size at which to split the tsdb WAL segment files. Example: 100MB"). + PreAction(serverOnlySetting()). Hidden().PlaceHolder("").BytesVar(&cfg.tsdb.WALSegmentSize) a.Flag("storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead."). + PreAction(serverOnlySetting()). SetValue(&oldFlagRetentionDuration) a.Flag("storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms."). + PreAction(serverOnlySetting()). SetValue(&newFlagRetentionDuration) a.Flag("storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\"."). + PreAction(serverOnlySetting()). BytesVar(&cfg.tsdb.MaxBytes) a.Flag("storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). + PreAction(serverOnlySetting()). Default("false").BoolVar(&cfg.tsdb.NoLockfile) a.Flag("storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge."). + PreAction(serverOnlySetting()). Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks) a.Flag("storage.tsdb.wal-compression", "Compress the tsdb WAL."). + PreAction(serverOnlySetting()). Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) + a.Flag("storage.agent.path", "Base path for metrics storage."). + PreAction(agentOnlySetting()). + Default("data-agent/").StringVar(&cfg.localStoragePath) + + a.Flag("storage.agent.segment-size", + "Size at which to split WAL segment files. Example: 100MB"). + PreAction(agentOnlySetting()). + Hidden().PlaceHolder("").BytesVar(&cfg.agent.WALSegmentSize) + + a.Flag("storage.agent.compression", "Compress the agent WAL."). + PreAction(agentOnlySetting()). + Default("true").BoolVar(&cfg.agent.WALCompression) + + a.Flag("storage.agent.truncate-frequency", + "The frequency at which to truncate the WAL and remove old data."). + PreAction(agentOnlySetting()). + Hidden().PlaceHolder("").SetValue(&cfg.agent.TruncateFrequency) + + a.Flag("storage.agent.retention.min-time", + "Minimum age samples may be before being considered for deletion when the WAL is truncated"). + PreAction(agentOnlySetting()). + SetValue(&cfg.agent.MinWALTime) + + a.Flag("storage.agent.retention.max-time", + "Maximum age samples may be before being forcibly deleted when the WAL is truncated"). + PreAction(agentOnlySetting()). + SetValue(&cfg.agent.MaxWALTime) + a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload."). Default("1m").PlaceHolder("").SetValue(&cfg.RemoteFlushDeadline) a.Flag("storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types."). + PreAction(serverOnlySetting()). Default("5e7").IntVar(&cfg.web.RemoteReadSampleLimit) a.Flag("storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit."). + PreAction(serverOnlySetting()). Default("10").IntVar(&cfg.web.RemoteReadConcurrencyLimit) a.Flag("storage.remote.read-max-bytes-in-frame", "Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default."). + PreAction(serverOnlySetting()). Default("1048576").IntVar(&cfg.web.RemoteReadBytesInFrame) a.Flag("rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert."). + PreAction(serverOnlySetting()). Default("1h").SetValue(&cfg.outageTolerance) a.Flag("rules.alert.for-grace-period", "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period."). + PreAction(serverOnlySetting()). Default("10m").SetValue(&cfg.forGracePeriod) a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager."). + PreAction(serverOnlySetting()). Default("1m").SetValue(&cfg.resendDelay) a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release."). @@ -308,21 +393,26 @@ func main() { Hidden().Default("2ms").DurationVar(&scrape.ScrapeTimestampTolerance) a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications."). + PreAction(serverOnlySetting()). Default("10000").IntVar(&cfg.notifier.QueueCapacity) // TODO: Remove in Prometheus 3.0. alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String() a.Flag("query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation."). + PreAction(serverOnlySetting()). Default("5m").SetValue(&cfg.lookbackDelta) a.Flag("query.timeout", "Maximum time a query may take before being aborted."). + PreAction(serverOnlySetting()). Default("2m").SetValue(&cfg.queryTimeout) a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently."). + PreAction(serverOnlySetting()). Default("20").IntVar(&cfg.queryConcurrency) a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return."). + PreAction(serverOnlySetting()). Default("50000000").IntVar(&cfg.queryMaxSamples) a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics, new-service-discovery-manager. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). @@ -390,7 +480,8 @@ func main() { // RoutePrefix must always be at least '/'. cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/") - { // Time retention settings. + if !agentMode { + // Time retention settings. if oldFlagRetentionDuration != 0 { level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.") cfg.tsdb.RetentionDuration = oldFlagRetentionDuration @@ -415,9 +506,8 @@ func main() { cfg.tsdb.RetentionDuration = y level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String()) } - } - { // Max block size settings. + // Max block size settings. if cfg.tsdb.MaxBlockDuration == 0 { maxBlockDuration, err := model.ParseDuration("31d") if err != nil { @@ -483,7 +573,12 @@ func main() { var ( scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage) - opts = promql.EngineOpts{ + queryEngine *promql.Engine + ruleManager *rules.Manager + ) + + if !agentMode { + opts := promql.EngineOpts{ Logger: log.With(logger, "component", "query engine"), Reg: prometheus.DefaultRegisterer, MaxSamples: cfg.queryMaxSamples, @@ -510,7 +605,7 @@ func main() { ForGracePeriod: time.Duration(cfg.forGracePeriod), ResendDelay: time.Duration(cfg.resendDelay), }) - ) + } scraper.Set(scrapeManager) @@ -526,6 +621,7 @@ func main() { cfg.web.RuleManager = ruleManager cfg.web.Notifier = notifierManager cfg.web.LookbackDelta = time.Duration(cfg.lookbackDelta) + cfg.web.IsAgent = agentMode cfg.web.Version = &web.PrometheusVersion{ Version: version.Version, @@ -572,6 +668,11 @@ func main() { }, { name: "query_engine", reloader: func(cfg *config.Config) error { + if agentMode { + // No-op in Agent mode. + return nil + } + if cfg.GlobalConfig.QueryLogFile == "" { queryEngine.SetQueryLogger(nil) return nil @@ -613,6 +714,11 @@ func main() { }, { name: "rules", reloader: func(cfg *config.Config) error { + if agentMode { + // No-op in Agent mode + return nil + } + // Get all rule files matching the configuration paths. var files []string for _, pat := range cfg.RuleFiles { @@ -817,7 +923,7 @@ func main() { }, ) } - { + if !agentMode { // Rule manager. g.Add( func() error { @@ -829,8 +935,7 @@ func main() { ruleManager.Stop() }, ) - } - { + // TSDB. opts := cfg.tsdb.ToTSDBOptions() cancel := make(chan struct{}) @@ -892,6 +997,59 @@ func main() { }, ) } + if agentMode { + // WAL storage. + opts := cfg.agent.ToAgentOptions() + cancel := make(chan struct{}) + g.Add( + func() error { + level.Info(logger).Log("msg", "Starting WAL storage ...") + if cfg.agent.WALSegmentSize != 0 { + if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 { + return errors.New("flag 'storage.agent.segment-size' must be set between 10MB and 256MB") + } + } + db, err := agent.Open( + logger, + prometheus.DefaultRegisterer, + remoteStorage, + cfg.localStoragePath, + &opts, + ) + if err != nil { + return errors.Wrap(err, "opening storage failed") + } + + switch fsType := prom_runtime.Statfs(cfg.localStoragePath); fsType { + case "NFS_SUPER_MAGIC": + level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") + default: + level.Info(logger).Log("fs_type", fsType) + } + + level.Info(logger).Log("msg", "Agent WAL storage started") + level.Debug(logger).Log("msg", "Agent WAL storage options", + "WALSegmentSize", cfg.agent.WALSegmentSize, + "WALCompression", cfg.agent.WALCompression, + "StripeSize", cfg.agent.StripeSize, + "TruncateFrequency", cfg.agent.TruncateFrequency, + "MinWALTime", cfg.agent.MinWALTime, + "MaxWALTime", cfg.agent.MaxWALTime, + ) + + localStorage.Set(db, 0) + close(dbOpen) + <-cancel + return nil + }, + func(e error) { + if err := fanoutStorage.Close(); err != nil { + level.Error(logger).Log("msg", "Error stopping storage", "err", err) + } + close(cancel) + }, + ) + } { // Web handler. g.Add( @@ -1015,6 +1173,25 @@ func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStor } } + // Perform validation for Agent-compatible configs and remove anything that's unsupported. + if agentMode { + // Perform validation for Agent-compatible configs and remove anything that's + // unsupported. + if len(conf.AlertingConfig.AlertRelabelConfigs) > 0 || len(conf.AlertingConfig.AlertmanagerConfigs) > 0 { + level.Warn(logger).Log("msg", "alerting configs not supported in agent mode") + conf.AlertingConfig.AlertRelabelConfigs = []*relabel.Config{} + conf.AlertingConfig.AlertmanagerConfigs = config.AlertmanagerConfigs{} + } + if len(conf.RuleFiles) > 0 { + level.Warn(logger).Log("msg", "recording rules not supported in agent mode") + conf.RuleFiles = []string{} + } + if len(conf.RemoteReadConfigs) > 0 { + level.Warn(logger).Log("msg", "remote_read configs not supported in agent mode") + conf.RemoteReadConfigs = []*config.RemoteReadConfig{} + } + } + failed := false for _, rl := range rls { rstart := time.Now() @@ -1115,18 +1292,21 @@ func sendAlerts(s sender, externalURL string) rules.NotifyFunc { // storage at a later point in time. type readyStorage struct { mtx sync.RWMutex - db *tsdb.DB + db storage.Storage startTimeMargin int64 stats *tsdb.DBStats } func (s *readyStorage) ApplyConfig(conf *config.Config) error { db := s.get() - return db.ApplyConfig(conf) + if db, ok := db.(*tsdb.DB); ok { + return db.ApplyConfig(conf) + } + return nil } // Set the storage. -func (s *readyStorage) Set(db *tsdb.DB, startTimeMargin int64) { +func (s *readyStorage) Set(db storage.Storage, startTimeMargin int64) { s.mtx.Lock() defer s.mtx.Unlock() @@ -1134,7 +1314,7 @@ func (s *readyStorage) Set(db *tsdb.DB, startTimeMargin int64) { s.startTimeMargin = startTimeMargin } -func (s *readyStorage) get() *tsdb.DB { +func (s *readyStorage) get() storage.Storage { s.mtx.RLock() x := s.db s.mtx.RUnlock() @@ -1151,15 +1331,21 @@ func (s *readyStorage) getStats() *tsdb.DBStats { // StartTime implements the Storage interface. func (s *readyStorage) StartTime() (int64, error) { if x := s.get(); x != nil { - var startTime int64 - - if len(x.Blocks()) > 0 { - startTime = x.Blocks()[0].Meta().MinTime - } else { - startTime = time.Now().Unix() * 1000 + switch db := x.(type) { + case *tsdb.DB: + var startTime int64 + if len(db.Blocks()) > 0 { + startTime = db.Blocks()[0].Meta().MinTime + } else { + startTime = time.Now().Unix() * 1000 + } + // Add a safety margin as it may take a few minutes for everything to spin up. + return startTime + s.startTimeMargin, nil + case *agent.DB: + return db.StartTime() + default: + panic(fmt.Sprintf("unkown storage type %T", db)) } - // Add a safety margin as it may take a few minutes for everything to spin up. - return startTime + s.startTimeMargin, nil } return math.MaxInt64, tsdb.ErrNotReady @@ -1183,7 +1369,14 @@ func (s *readyStorage) ChunkQuerier(ctx context.Context, mint, maxt int64) (stor func (s *readyStorage) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { if x := s.get(); x != nil { - return x.ExemplarQuerier(ctx) + switch db := x.(type) { + case *tsdb.DB: + return db.ExemplarQuerier(ctx) + case *agent.DB: + return nil, agent.ErrUnsupported + default: + panic(fmt.Sprintf("unknown storage type %T", db)) + } } return nil, tsdb.ErrNotReady } @@ -1221,7 +1414,14 @@ func (s *readyStorage) Close() error { // CleanTombstones implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces. func (s *readyStorage) CleanTombstones() error { if x := s.get(); x != nil { - return x.CleanTombstones() + switch db := x.(type) { + case *tsdb.DB: + return db.CleanTombstones() + case *agent.DB: + return agent.ErrUnsupported + default: + panic(fmt.Sprintf("unknown storage type %T", db)) + } } return tsdb.ErrNotReady } @@ -1229,7 +1429,14 @@ func (s *readyStorage) CleanTombstones() error { // Delete implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces. func (s *readyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error { if x := s.get(); x != nil { - return x.Delete(mint, maxt, ms...) + switch db := x.(type) { + case *tsdb.DB: + return db.Delete(mint, maxt, ms...) + case *agent.DB: + return agent.ErrUnsupported + default: + panic(fmt.Sprintf("unknown storage type %T", db)) + } } return tsdb.ErrNotReady } @@ -1237,7 +1444,14 @@ func (s *readyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error { // Snapshot implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces. func (s *readyStorage) Snapshot(dir string, withHead bool) error { if x := s.get(); x != nil { - return x.Snapshot(dir, withHead) + switch db := x.(type) { + case *tsdb.DB: + return db.Snapshot(dir, withHead) + case *agent.DB: + return agent.ErrUnsupported + default: + panic(fmt.Sprintf("unknown storage type %T", db)) + } } return tsdb.ErrNotReady } @@ -1245,7 +1459,14 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error { // Stats implements the api_v1.TSDBAdminStats interface. func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) { if x := s.get(); x != nil { - return x.Head().Stats(statsByLabelName), nil + switch db := x.(type) { + case *tsdb.DB: + return db.Head().Stats(statsByLabelName), nil + case *agent.DB: + return nil, agent.ErrUnsupported + default: + panic(fmt.Sprintf("unknown storage type %T", db)) + } } return nil, tsdb.ErrNotReady } @@ -1323,6 +1544,27 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { } } +// agentOptions is a version of agent.Options with defined units. This is required +// as agent.Option fields are unit agnostic (time). +type agentOptions struct { + WALSegmentSize units.Base2Bytes + WALCompression bool + StripeSize int + TruncateFrequency model.Duration + MinWALTime, MaxWALTime model.Duration +} + +func (opts agentOptions) ToAgentOptions() agent.Options { + return agent.Options{ + WALSegmentSize: int(opts.WALSegmentSize), + WALCompression: opts.WALCompression, + StripeSize: opts.StripeSize, + TruncateFrequency: time.Duration(opts.TruncateFrequency), + MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), + MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)), + } +} + func initTracing(logger log.Logger) (io.Closer, error) { // Set tracing configuration defaults. cfg := &jcfg.Configuration{ diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 98b2dd9cf9..6f9a1d566b 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -347,3 +347,23 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } return res } + +func TestAgentSuccessfulStartup(t *testing.T) { + prom := exec.Command(promPath, "-test.main", "--agent", "--config.file="+promConfig) + err := prom.Start() + require.NoError(t, err) + + expectedExitStatus := 0 + actualExitStatus := 0 + + done := make(chan error, 1) + go func() { done <- prom.Wait() }() + select { + case err := <-done: + t.Logf("prometheus agent should be still running: %v", err) + actualExitStatus = prom.ProcessState.ExitCode() + case <-time.After(5 * time.Second): + prom.Process.Kill() + } + require.Equal(t, expectedExitStatus, actualExitStatus) +} diff --git a/storage/remote/storage.go b/storage/remote/storage.go index 5716605a8d..df20af7134 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -179,6 +179,11 @@ func (s *Storage) Appender(ctx context.Context) storage.Appender { return s.rws.Appender(ctx) } +// LowestSentTimestamp returns the lowest sent timestamp across all queues. +func (s *Storage) LowestSentTimestamp() int64 { + return s.rws.LowestSentTimestamp() +} + // Close the background processing of the storage queues. func (s *Storage) Close() error { s.mtx.Lock() diff --git a/storage/remote/write.go b/storage/remote/write.go index b3fec364a7..b56d258d7b 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -16,6 +16,7 @@ package remote import ( "context" "fmt" + "math" "sync" "time" @@ -207,6 +208,26 @@ func (rws *WriteStorage) Appender(_ context.Context) storage.Appender { } } +// LowestSentTimestamp returns the lowest sent timestamp across all queues. +func (rws *WriteStorage) LowestSentTimestamp() int64 { + rws.mtx.Lock() + defer rws.mtx.Unlock() + + var lowestTs int64 = math.MaxInt64 + + for _, q := range rws.queues { + ts := int64(q.metrics.highestSentTimestamp.Get() * 1000) + if ts < lowestTs { + lowestTs = ts + } + } + if len(rws.queues) == 0 { + lowestTs = 0 + } + + return lowestTs +} + // Close closes the WriteStorage. func (rws *WriteStorage) Close() error { rws.mtx.Lock() diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go new file mode 100644 index 0000000000..40e192f5c9 --- /dev/null +++ b/tsdb/agent/db.go @@ -0,0 +1,761 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package agent + +import ( + "context" + "fmt" + "path/filepath" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/exemplar" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/wal" + "go.uber.org/atomic" +) + +var ( + ErrUnsupported = errors.New("unsupported operation with WAL-only storage") +) + +// Default values for options. +var ( + DefaultTruncateFrequency = 2 * time.Hour + DefaultMinWALTime = int64(5 * time.Minute / time.Millisecond) + DefaultMaxWALTime = int64(4 * time.Hour / time.Millisecond) +) + +// Options of the WAL storage. +type Options struct { + // Segments (wal files) max size. + // WALSegmentSize <= 0, segment size is default size. + // WALSegmentSize > 0, segment size is WALSegmentSize. + WALSegmentSize int + + // WALCompression will turn on Snappy compression for records on the WAL. + WALCompression bool + + // StripeSize is the size (power of 2) in entries of the series hash map. Reducing the size will save memory but impact performance. + StripeSize int + + // TruncateFrequency determines how frequently to truncate data from the WAL. + TruncateFrequency time.Duration + + // Shortest and longest amount of time data can exist in the WAL before being + // deleted. + MinWALTime, MaxWALTime int64 +} + +// DefaultOptions used for the WAL storage. They are sane for setups using +// millisecond-precision timestamps. +func DefaultOptions() *Options { + return &Options{ + WALSegmentSize: wal.DefaultSegmentSize, + WALCompression: false, + StripeSize: tsdb.DefaultStripeSize, + TruncateFrequency: DefaultTruncateFrequency, + MinWALTime: DefaultMinWALTime, + MaxWALTime: DefaultMaxWALTime, + } +} + +type dbMetrics struct { + r prometheus.Registerer + + numActiveSeries prometheus.Gauge + numWALSeriesPendingDeletion prometheus.Gauge + totalAppendedSamples prometheus.Counter + walTruncateDuration prometheus.Summary + walCorruptionsTotal prometheus.Counter + walTotalReplayDuration prometheus.Gauge + checkpointDeleteFail prometheus.Counter + checkpointDeleteTotal prometheus.Counter + checkpointCreationFail prometheus.Counter + checkpointCreationTotal prometheus.Counter +} + +func newDBMetrics(r prometheus.Registerer) *dbMetrics { + m := dbMetrics{r: r} + m.numActiveSeries = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_agent_active_series", + Help: "Number of active series being tracked by the WAL storage", + }) + + m.numWALSeriesPendingDeletion = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_agent_deleted_series", + Help: "Number of series pending deletion from the WAL", + }) + + m.totalAppendedSamples = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_samples_appended_total", + Help: "Total number of samples appended to the storage", + }) + + m.walTruncateDuration = prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "prometheus_agent_truncate_duration_seconds", + Help: "Duration of WAL truncation.", + }) + + m.walCorruptionsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_corruptions_total", + Help: "Total number of WAL corruptions.", + }) + + m.walTotalReplayDuration = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_agent_data_replay_duration_seconds", + Help: "Time taken to replay the data on disk.", + }) + + m.checkpointDeleteFail = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_checkpoint_deletions_failed_total", + Help: "Total number of checkpoint deletions that failed.", + }) + + m.checkpointDeleteTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_checkpoint_deletions_total", + Help: "Total number of checkpoint deletions attempted.", + }) + + m.checkpointCreationFail = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_checkpoint_creations_failed_total", + Help: "Total number of checkpoint creations that failed.", + }) + + m.checkpointCreationTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_checkpoint_creations_total", + Help: "Total number of checkpoint creations attempted.", + }) + + if r != nil { + r.MustRegister( + m.numActiveSeries, + m.numWALSeriesPendingDeletion, + m.totalAppendedSamples, + m.walTruncateDuration, + m.walCorruptionsTotal, + m.walTotalReplayDuration, + m.checkpointDeleteFail, + m.checkpointDeleteTotal, + m.checkpointCreationFail, + m.checkpointCreationTotal, + ) + } + + return &m +} + +func (m *dbMetrics) Unregister() { + if m.r == nil { + return + } + cs := []prometheus.Collector{ + m.numActiveSeries, + m.numWALSeriesPendingDeletion, + m.totalAppendedSamples, + } + for _, c := range cs { + m.r.Unregister(c) + } +} + +// DB represents a WAL-only storage. It implements storage.DB. +type DB struct { + mtx sync.RWMutex + logger log.Logger + opts *Options + rs *remote.Storage + + wal *wal.WAL + + appenderPool sync.Pool + bufPool sync.Pool + + nextRef *atomic.Uint64 + series *stripeSeries + // deleted is a map of (ref IDs that should be deleted from WAL) to (the WAL segment they + // must be kept around to). + deleted map[uint64]int + + donec chan struct{} + stopc chan struct{} + + metrics *dbMetrics +} + +// Open returns a new agent.DB in the given directory. +func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir string, opts *Options) (*DB, error) { + opts = validateOptions(opts) + + // remote_write expects WAL to be stored in a "wal" subdirectory of the main storage. + dir = filepath.Join(dir, "wal") + + w, err := wal.NewSize(l, reg, dir, opts.WALSegmentSize, opts.WALCompression) + if err != nil { + return nil, errors.Wrap(err, "creating WAL") + } + + db := &DB{ + logger: l, + opts: opts, + rs: rs, + + wal: w, + + nextRef: atomic.NewUint64(0), + series: newStripeSeries(opts.StripeSize), + deleted: make(map[uint64]int), + + donec: make(chan struct{}), + stopc: make(chan struct{}), + + metrics: newDBMetrics(reg), + } + + db.bufPool.New = func() interface{} { + return make([]byte, 0, 1024) + } + + db.appenderPool.New = func() interface{} { + return &appender{ + DB: db, + pendingSeries: make([]record.RefSeries, 0, 100), + pendingSamples: make([]record.RefSample, 0, 100), + } + } + + if err := db.replayWAL(); err != nil { + level.Warn(db.logger).Log("msg", "encountered WAL read error, attempting repair", "err", err) + if err := w.Repair(err); err != nil { + return nil, errors.Wrap(err, "repair corrupted WAL") + } + } + + go db.run() + return db, nil +} + +func validateOptions(opts *Options) *Options { + if opts == nil { + opts = DefaultOptions() + } + if opts.WALSegmentSize <= 0 { + opts.WALSegmentSize = wal.DefaultSegmentSize + } + + // Revert Stripesize to DefaultStripsize if Stripsize is either 0 or not a power of 2. + if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) { + opts.StripeSize = tsdb.DefaultStripeSize + } + if opts.TruncateFrequency <= 0 { + opts.TruncateFrequency = DefaultTruncateFrequency + } + if opts.MinWALTime <= 0 { + opts.MinWALTime = 0 + } + if opts.MaxWALTime <= 0 { + opts.MaxWALTime = DefaultMaxWALTime + } + + if t := int64(opts.TruncateFrequency * time.Hour / time.Millisecond); opts.MaxWALTime < t { + opts.MaxWALTime = t + } + return opts +} + +func (db *DB) replayWAL() error { + level.Info(db.logger).Log("msg", "replaying WAL, this may take a while", "dir", db.wal.Dir()) + start := time.Now() + + dir, startFrom, err := wal.LastCheckpoint(db.wal.Dir()) + if err != nil && err != record.ErrNotFound { + return errors.Wrap(err, "find last checkpoint") + } + + multiRef := map[uint64]uint64{} + + if err == nil { + sr, err := wal.NewSegmentsReader(dir) + if err != nil { + return errors.Wrap(err, "open checkpoint") + } + defer func() { + if err := sr.Close(); err != nil { + level.Warn(db.logger).Log("msg", "error while closing the wal segments reader", "err", err) + } + }() + + // A corrupted checkpoint is a hard error for now and requires user + // intervention. There's likely little data that can be recovered anyway. + if err := db.loadWAL(wal.NewReader(sr), multiRef); err != nil { + return errors.Wrap(err, "backfill checkpoint") + } + startFrom++ + level.Info(db.logger).Log("msg", "WAL checkpoint loaded") + } + + // Find the last segment. + _, last, err := wal.Segments(db.wal.Dir()) + if err != nil { + return errors.Wrap(err, "finding WAL segments") + } + + // Backfil segments from the most recent checkpoint onwards. + for i := startFrom; i <= last; i++ { + seg, err := wal.OpenReadSegment(wal.SegmentName(db.wal.Dir(), i)) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i)) + } + + sr := wal.NewSegmentBufReader(seg) + err = db.loadWAL(wal.NewReader(sr), multiRef) + if err := sr.Close(); err != nil { + level.Warn(db.logger).Log("msg", "error while closing the wal segments reader", "err", err) + } + if err != nil { + return err + } + level.Info(db.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", last) + } + + walReplayDuration := time.Since(start) + db.metrics.walTotalReplayDuration.Set(walReplayDuration.Seconds()) + + return nil +} + +func (db *DB) loadWAL(r *wal.Reader, multiRef map[uint64]uint64) (err error) { + var ( + dec record.Decoder + lastRef uint64 + + decoded = make(chan interface{}, 10) + errCh = make(chan error, 1) + seriesPool = sync.Pool{ + New: func() interface{} { + return []record.RefSeries{} + }, + } + samplesPool = sync.Pool{ + New: func() interface{} { + return []record.RefSample{} + }, + } + ) + + go func() { + defer close(decoded) + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Series: + series := seriesPool.Get().([]record.RefSeries)[:0] + series, err = dec.Series(rec, series) + if err != nil { + errCh <- &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode series"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- series + case record.Samples: + samples := samplesPool.Get().([]record.RefSample)[:0] + samples, err = dec.Samples(rec, samples) + if err != nil { + errCh <- &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode samples"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- samples + case record.Tombstones: + // We don't care about tombstones + continue + case record.Exemplars: + // We don't care about exemplars + continue + default: + errCh <- &wal.CorruptionErr{ + Err: errors.Errorf("invalid record type %v", dec.Type(rec)), + Segment: r.Segment(), + Offset: r.Offset(), + } + } + } + }() + + var nonExistentSeriesRefs atomic.Uint64 + + for d := range decoded { + switch v := d.(type) { + case []record.RefSeries: + for _, entry := range v { + // If this is a new series, create it in memory. If we never read in a + // sample for this series, its timestamp will remain at 0 and it will + // be deleted at the next GC. + if db.series.GetByID(entry.Ref) == nil { + series := &memSeries{ref: entry.Ref, lset: entry.Labels, lastTs: 0} + db.series.Set(entry.Labels.Hash(), series) + multiRef[entry.Ref] = series.ref + db.metrics.numActiveSeries.Inc() + if entry.Ref > lastRef { + lastRef = entry.Ref + } + } + } + + //nolint:staticcheck + seriesPool.Put(v) + case []record.RefSample: + for _, entry := range v { + // Update the lastTs for the series based + ref, ok := multiRef[entry.Ref] + if !ok { + nonExistentSeriesRefs.Inc() + continue + } + series := db.series.GetByID(ref) + if entry.T > series.lastTs { + series.lastTs = entry.T + } + } + + //nolint:staticcheck + samplesPool.Put(v) + default: + panic(fmt.Errorf("unexpected decoded type: %T", d)) + } + } + + if v := nonExistentSeriesRefs.Load(); v > 0 { + level.Warn(db.logger).Log("msg", "found sample referencing non-existing series", "skipped_series", v) + } + + db.nextRef.Store(lastRef) + + select { + case err := <-errCh: + return err + default: + if r.Err() != nil { + return errors.Wrap(r.Err(), "read records") + } + return nil + } +} + +func (db *DB) run() { + defer close(db.donec) + +Loop: + for { + select { + case <-db.stopc: + break Loop + case <-time.After(db.opts.TruncateFrequency): + // The timestamp ts is used to determine which series are not receiving + // samples and may be deleted from the WAL. Their most recent append + // timestamp is compared to ts, and if that timestamp is older then ts, + // they are considered inactive and may be deleted. + // + // Subtracting a duration from ts will add a buffer for when series are + // considered inactive and safe for deletion. + ts := db.rs.LowestSentTimestamp() - db.opts.MinWALTime + if ts < 0 { + ts = 0 + } + + // Network issues can prevent the result of getRemoteWriteTimestamp from + // changing. We don't want data in the WAL to grow forever, so we set a cap + // on the maximum age data can be. If our ts is older than this cutoff point, + // we'll shift it forward to start deleting very stale data. + if maxTS := timestamp.FromTime(time.Now()) - db.opts.MaxWALTime; ts < maxTS { + ts = maxTS + } + + level.Debug(db.logger).Log("msg", "truncating the WAL", "ts", ts) + if err := db.truncate(ts); err != nil { + level.Warn(db.logger).Log("msg", "failed to truncate WAL", "err", err) + } + } + } +} + +func (db *DB) truncate(mint int64) error { + db.mtx.RLock() + defer db.mtx.RUnlock() + + start := time.Now() + + db.gc(mint) + level.Info(db.logger).Log("msg", "series GC completed", "duration", time.Since(start)) + + first, last, err := wal.Segments(db.wal.Dir()) + if err != nil { + return errors.Wrap(err, "get segment range") + } + + // Start a new segment so low ingestion volume instances don't have more WAL + // than needed. + err = db.wal.NextSegment() + if err != nil { + return errors.Wrap(err, "next segment") + } + + last-- // Never consider most recent segment for checkpoint + if last < 0 { + return nil // no segments yet + } + + // The lower two-thirds of segments should contain mostly obsolete samples. + // If we have less than two segments, it's not worth checkpointing yet. + last = first + (last-first)*2/3 + if last <= first { + return nil + } + + keep := func(id uint64) bool { + if db.series.GetByID(id) != nil { + return true + } + + seg, ok := db.deleted[id] + return ok && seg >= first + } + + db.metrics.checkpointCreationTotal.Inc() + + if _, err = wal.Checkpoint(db.logger, db.wal, first, last, keep, mint); err != nil { + db.metrics.checkpointCreationFail.Inc() + if _, ok := errors.Cause(err).(*wal.CorruptionErr); ok { + db.metrics.walCorruptionsTotal.Inc() + } + return errors.Wrap(err, "create checkpoint") + } + if err := db.wal.Truncate(last + 1); err != nil { + // If truncating fails, we'll just try it again at the next checkpoint. + // Leftover segments will still just be ignored in the future if there's a + // checkpoint that supersedes them. + level.Error(db.logger).Log("msg", "truncating segments failed", "err", err) + } + + // The checkpoint is written and segments before it are truncated, so we + // no longer need to track deleted series that were being kept around. + for ref, segment := range db.deleted { + if segment < first { + delete(db.deleted, ref) + } + } + db.metrics.checkpointDeleteTotal.Inc() + db.metrics.numWALSeriesPendingDeletion.Set(float64(len(db.deleted))) + + if err := wal.DeleteCheckpoints(db.wal.Dir(), last); err != nil { + // Leftover old checkpoints do not cause problems down the line beyond + // occupying disk space. They will just be ignored since a newer checkpoint + // exists. + level.Error(db.logger).Log("msg", "delete old checkpoints", "err", err) + db.metrics.checkpointDeleteFail.Inc() + } + + db.metrics.walTruncateDuration.Observe(time.Since(start).Seconds()) + + level.Info(db.logger).Log("msg", "WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) + return nil +} + +// gc marks ref IDs that have not received a sample since mint as deleted in +// s.deleted, along with the segment where they originally got deleted. +func (db *DB) gc(mint int64) { + deleted := db.series.GC(mint) + db.metrics.numActiveSeries.Sub(float64(len(deleted))) + + _, last, _ := wal.Segments(db.wal.Dir()) + + // We want to keep series records for any newly deleted series + // until we've passed the last recorded segment. This prevents + // the WAL having samples for series records that no longer exist. + for ref := range deleted { + db.deleted[ref] = last + } + + db.metrics.numWALSeriesPendingDeletion.Set(float64(len(db.deleted))) +} + +// StartTime implements the Storage interface. +func (db *DB) StartTime() (int64, error) { + return int64(model.Latest), nil +} + +// Querier implements the Storage interface. +func (db *DB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return nil, ErrUnsupported +} + +// ChunkQuerier implements the Storage interface. +func (db *DB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { + return nil, ErrUnsupported +} + +// ExemplarQuerier implements the Storage interface. +func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { + return nil, ErrUnsupported +} + +// Appender implements storage.Storage. +func (db *DB) Appender(_ context.Context) storage.Appender { + return db.appenderPool.Get().(storage.Appender) +} + +// Close implements the Storage interface. +func (db *DB) Close() error { + db.mtx.Lock() + defer db.mtx.Unlock() + + close(db.stopc) + <-db.donec + + db.metrics.Unregister() + + return db.wal.Close() +} + +type appender struct { + *DB + + pendingSeries []record.RefSeries + pendingSamples []record.RefSample +} + +func (a *appender) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) { + if ref == 0 { + return a.Add(l, t, v) + } + return ref, a.AddFast(ref, t, v) +} + +func (a *appender) Add(l labels.Labels, t int64, v float64) (uint64, error) { + hash := l.Hash() + series := a.series.GetByHash(hash, l) + if series != nil { + return series.ref, a.AddFast(series.ref, t, v) + } + + // Ensure no empty or duplicate labels have gotten through. This mirrors the + // equivalent validation code in the TSDB's headAppender. + l = l.WithoutEmpty() + if len(l) == 0 { + return 0, errors.Wrap(tsdb.ErrInvalidSample, "empty labelset") + } + + if lbl, dup := l.HasDuplicateLabelNames(); dup { + return 0, errors.Wrap(tsdb.ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, lbl)) + } + + ref := a.nextRef.Inc() + series = &memSeries{ref: ref, lset: l, lastTs: t} + + a.pendingSeries = append(a.pendingSeries, record.RefSeries{ + Ref: ref, + Labels: l, + }) + a.pendingSamples = append(a.pendingSamples, record.RefSample{ + Ref: ref, + T: t, + V: v, + }) + + a.series.Set(hash, series) + + a.metrics.numActiveSeries.Inc() + a.metrics.totalAppendedSamples.Inc() + + return series.ref, nil +} + +func (a *appender) AddFast(ref uint64, t int64, v float64) error { + series := a.series.GetByID(ref) + if series == nil { + return storage.ErrNotFound + } + series.Lock() + defer series.Unlock() + + // Update last recorded timestamp. Used by Storage.gc to determine if a + // series is dead. + series.lastTs = t + + a.pendingSamples = append(a.pendingSamples, record.RefSample{ + Ref: ref, + T: t, + V: v, + }) + + a.metrics.totalAppendedSamples.Inc() + return nil +} + +func (a *appender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { + // remote_write doesn't support exemplars yet, so do nothing here. + return 0, nil +} + +// Commit submits the collected samples and purges the batch. +func (a *appender) Commit() error { + a.mtx.RLock() + defer a.mtx.RUnlock() + + var encoder record.Encoder + buf := a.bufPool.Get().([]byte) + + if len(a.pendingSeries) > 0 { + buf = encoder.Series(a.pendingSeries, buf) + if err := a.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] + } + + if len(a.pendingSamples) > 0 { + buf = encoder.Samples(a.pendingSamples, buf) + if err := a.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] + } + + //nolint:staticcheck + a.bufPool.Put(buf) + return a.Rollback() +} + +func (a *appender) Rollback() error { + a.pendingSeries = a.pendingSeries[:0] + a.pendingSamples = a.pendingSamples[:0] + a.appenderPool.Put(a) + return nil +} diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go new file mode 100644 index 0000000000..e1a6106a36 --- /dev/null +++ b/tsdb/agent/db_test.go @@ -0,0 +1,449 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package agent + +import ( + "context" + "io/ioutil" + "os" + "strconv" + "sync" + "testing" + "time" + + "github.com/go-kit/log" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/tsdb/wal" + "github.com/stretchr/testify/require" +) + +func TestUnsupported(t *testing.T) { + promAgentDir, err := ioutil.TempDir("", "TestUnsupported") + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(promAgentDir)) + }) + + opts := DefaultOptions() + logger := log.NewNopLogger() + + s, err := Open(logger, prometheus.DefaultRegisterer, nil, promAgentDir, opts) + if err != nil { + t.Fatalf("unable to create storage for the agent: %v", err) + } + defer s.Close() + + t.Run("Querier", func(t *testing.T) { + _, err := s.Querier(context.TODO(), 0, 0) + require.Equal(t, err, ErrUnsupported) + }) + + t.Run("ChunkQuerier", func(t *testing.T) { + _, err := s.ChunkQuerier(context.TODO(), 0, 0) + require.Equal(t, err, ErrUnsupported) + }) + + t.Run("ExemplarQuerier", func(t *testing.T) { + _, err := s.ExemplarQuerier(context.TODO()) + require.Equal(t, err, ErrUnsupported) + }) +} + +func TestCommit(t *testing.T) { + const ( + numDatapoints = 1000 + numSeries = 8 + ) + + promAgentDir, err := ioutil.TempDir("", t.Name()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(promAgentDir)) + }) + + lbls := labelsForTest(t.Name(), numSeries) + opts := DefaultOptions() + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + + s, err := Open(logger, reg, remoteStorage, promAgentDir, opts) + if err != nil { + t.Fatalf("unable to create storage for the agent: %v", err) + } + + a := s.Appender(context.TODO()) + + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + sample := tsdbutil.GenerateSamples(0, 1) + _, err := a.Append(0, lset, sample[0].T(), sample[0].V()) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) + } + + // Read records from WAL and check for expected count of series and samples. + walSeriesCount := 0 + walSamplesCount := 0 + + reg = prometheus.NewRegistry() + remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + + s, err = Open(logger, nil, remoteStorage, promAgentDir, opts) + if err != nil { + t.Fatalf("unable to create storage for the agent: %v", err) + } + + var dec record.Decoder + + if err == nil { + sr, err := wal.NewSegmentsReader(s.wal.Dir()) + require.NoError(t, err) + + r := wal.NewReader(sr) + seriesPool := sync.Pool{ + New: func() interface{} { + return []record.RefSeries{} + }, + } + samplesPool := sync.Pool{ + New: func() interface{} { + return []record.RefSample{} + }, + } + + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Series: + series := seriesPool.Get().([]record.RefSeries)[:0] + series, _ = dec.Series(rec, series) + walSeriesCount += len(series) + case record.Samples: + samples := samplesPool.Get().([]record.RefSample)[:0] + samples, _ = dec.Samples(rec, samples) + walSamplesCount += len(samples) + default: + } + } + } + + // Retrieved series count from WAL should match the count of series been added to the WAL. + require.Equal(t, walSeriesCount, numSeries) + + // Retrieved samples count from WAL should match the count of samples been added to the WAL. + require.Equal(t, walSamplesCount, numSeries*numDatapoints) +} + +func TestRollback(t *testing.T) { + const ( + numDatapoints = 1000 + numSeries = 8 + ) + + promAgentDir, err := ioutil.TempDir("", t.Name()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(promAgentDir)) + }) + + lbls := labelsForTest(t.Name(), numSeries) + opts := DefaultOptions() + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + + s, err := Open(logger, reg, remoteStorage, promAgentDir, opts) + if err != nil { + t.Fatalf("unable to create storage for the agent: %v", err) + } + + a := s.Appender(context.TODO()) + + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + sample := tsdbutil.GenerateSamples(0, 1) + _, err := a.Append(0, lset, sample[0].T(), sample[0].V()) + require.NoError(t, err) + } + } + + require.NoError(t, a.Rollback()) + + // Read records from WAL and check for expected count of series and samples. + walSeriesCount := 0 + walSamplesCount := 0 + + reg = prometheus.NewRegistry() + remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + + s, err = Open(logger, nil, remoteStorage, promAgentDir, opts) + if err != nil { + t.Fatalf("unable to create storage for the agent: %v", err) + } + + var dec record.Decoder + + if err == nil { + sr, err := wal.NewSegmentsReader(s.wal.Dir()) + require.NoError(t, err) + + r := wal.NewReader(sr) + seriesPool := sync.Pool{ + New: func() interface{} { + return []record.RefSeries{} + }, + } + samplesPool := sync.Pool{ + New: func() interface{} { + return []record.RefSample{} + }, + } + + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Series: + series := seriesPool.Get().([]record.RefSeries)[:0] + series, _ = dec.Series(rec, series) + walSeriesCount += len(series) + case record.Samples: + samples := samplesPool.Get().([]record.RefSample)[:0] + samples, _ = dec.Samples(rec, samples) + walSamplesCount += len(samples) + default: + } + } + } + + // Retrieved series count from WAL should be zero. + require.Equal(t, walSeriesCount, 0) + + // Retrieved samples count from WAL should be zero. + require.Equal(t, walSamplesCount, 0) +} + +func TestFullTruncateWAL(t *testing.T) { + const ( + numDatapoints = 1000 + numSeries = 800 + lastTs = 500 + ) + + promAgentDir, err := ioutil.TempDir("", t.Name()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(promAgentDir)) + }) + + lbls := labelsForTest(t.Name(), numSeries) + opts := DefaultOptions() + opts.TruncateFrequency = time.Minute * 2 + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + + s, err := Open(logger, reg, remoteStorage, promAgentDir, opts) + if err != nil { + t.Fatalf("unable to create storage for the agent: %v", err) + } + + a := s.Appender(context.TODO()) + + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + _, err := a.Append(0, lset, int64(lastTs), 0) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) + } + + // Truncate WAL with mint to GC all the samples. + s.truncate(lastTs + 1) + + m := gatherFamily(t, reg, "prometheus_agent_deleted_series") + require.Equal(t, float64(numSeries), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count") +} + +func TestPartialTruncateWAL(t *testing.T) { + const ( + numDatapoints = 1000 + numSeries = 800 + ) + + promAgentDir, err := ioutil.TempDir("", t.Name()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(promAgentDir)) + }) + + opts := DefaultOptions() + opts.TruncateFrequency = time.Minute * 2 + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + + s, err := Open(logger, reg, remoteStorage, promAgentDir, opts) + if err != nil { + t.Fatalf("unable to create storage for the agent: %v", err) + } + + a := s.Appender(context.TODO()) + + var lastTs int64 + + // Create first batch of 800 series with 1000 data-points with a fixed lastTs as 500. + lastTs = 500 + lbls := labelsForTest(t.Name()+"batch-1", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + _, err := a.Append(0, lset, lastTs, 0) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) + } + + // Create second batch of 800 series with 1000 data-points with a fixed lastTs as 600. + lastTs = 600 + + lbls = labelsForTest(t.Name()+"batch-2", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + _, err := a.Append(0, lset, lastTs, 0) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) + } + + // Truncate WAL with mint to GC only the first batch of 800 series and retaining 2nd batch of 800 series. + s.truncate(lastTs - 1) + + m := gatherFamily(t, reg, "prometheus_agent_deleted_series") + require.Equal(t, m.Metric[0].Gauge.GetValue(), float64(numSeries), "agent wal truncate mismatch of deleted series count") +} + +func TestWALReplay(t *testing.T) { + const ( + numDatapoints = 1000 + numSeries = 8 + lastTs = 500 + ) + + promAgentDir, err := ioutil.TempDir("", t.Name()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(promAgentDir)) + }) + + lbls := labelsForTest(t.Name(), numSeries) + opts := DefaultOptions() + + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + + s, err := Open(logger, reg, remoteStorage, promAgentDir, opts) + if err != nil { + t.Fatalf("unable to create storage for the agent: %v", err) + } + + a := s.Appender(context.TODO()) + + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + _, err := a.Append(0, lset, lastTs, 0) + require.NoError(t, err) + } + } + + require.NoError(t, a.Commit()) + + restartOpts := DefaultOptions() + restartLogger := log.NewNopLogger() + restartReg := prometheus.NewRegistry() + + s, err = Open(restartLogger, restartReg, nil, promAgentDir, restartOpts) + if err != nil { + t.Fatalf("unable to create storage for the agent: %v", err) + } + + // Check if all the series are retrieved back from the WAL. + m := gatherFamily(t, restartReg, "prometheus_agent_active_series") + require.Equal(t, float64(numSeries), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count") + + // Check if lastTs of the samples retrieved from the WAL is retained. + metrics := s.series.series + for i := 0; i < len(metrics); i++ { + mp := metrics[i] + for _, v := range mp { + require.Equal(t, v.lastTs, int64(lastTs)) + } + } +} + +func startTime() (int64, error) { + return time.Now().Unix() * 1000, nil +} + +// Create series for tests. +func labelsForTest(lName string, seriesCount int) []labels.Labels { + var series []labels.Labels + + for i := 0; i < seriesCount; i++ { + lset := labels.Labels{ + {Name: "a", Value: lName}, + {Name: "job", Value: "prometheus"}, + {Name: "instance", Value: "localhost" + strconv.Itoa(i)}, + } + series = append(series, lset) + } + + return series +} + +func gatherFamily(t *testing.T, reg prometheus.Gatherer, familyName string) *dto.MetricFamily { + t.Helper() + + families, err := reg.Gather() + require.NoError(t, err, "failed to gather metrics") + + for _, f := range families { + if f.GetName() == familyName { + return f + } + } + + t.Fatalf("could not find family %s", familyName) + + return nil +} diff --git a/tsdb/agent/series.go b/tsdb/agent/series.go new file mode 100644 index 0000000000..71b3ca2e26 --- /dev/null +++ b/tsdb/agent/series.go @@ -0,0 +1,177 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package agent + +import ( + "sync" + + "github.com/prometheus/prometheus/pkg/labels" +) + +// memSeries is a chunkless version of tsdb.memSeries. +type memSeries struct { + sync.Mutex + + ref uint64 + lset labels.Labels + lastTs int64 +} + +// seriesHashmap is a simple hashmap for memSeries by their label set. +// It is built on top of a regular hashmap and holds a slice of series to +// resolve hash collisions. Its methods require the hash to be submitted +// with the label set to avoid re-computing hash throughout the code. +type seriesHashmap map[uint64][]*memSeries + +func (m seriesHashmap) Get(hash uint64, lset labels.Labels) *memSeries { + for _, s := range m[hash] { + if labels.Equal(s.lset, lset) { + return s + } + } + return nil +} + +func (m seriesHashmap) Set(hash uint64, s *memSeries) { + seriesSet := m[hash] + for i, prev := range seriesSet { + if labels.Equal(prev.lset, s.lset) { + seriesSet[i] = s + return + } + } + m[hash] = append(seriesSet, s) +} + +func (m seriesHashmap) Delete(hash uint64, ref uint64) { + var rem []*memSeries + for _, s := range m[hash] { + if s.ref != ref { + rem = append(rem, s) + } + } + if len(rem) == 0 { + delete(m, hash) + } else { + m[hash] = rem + } +} + +// stripeSeries locks modulo ranges of IDs and hashes to reduce lock +// contention. The locks are padded to not be on the same cache line. +// Filling the padded space with the maps was profiled to be slower - +// likely due to the additional pointer dereferences. +type stripeSeries struct { + size int + series []map[uint64]*memSeries + hashes []seriesHashmap + locks []stripeLock +} + +type stripeLock struct { + sync.RWMutex + // Padding to avoid multiple locks being on the same cache line. + _ [40]byte +} + +func newStripeSeries(stripeSize int) *stripeSeries { + s := &stripeSeries{ + size: stripeSize, + series: make([]map[uint64]*memSeries, stripeSize), + hashes: make([]seriesHashmap, stripeSize), + locks: make([]stripeLock, stripeSize), + } + for i := range s.series { + s.series[i] = map[uint64]*memSeries{} + } + for i := range s.hashes { + s.hashes[i] = seriesHashmap{} + } + return s +} + +// GC garbage collects old series that have not received a sample after mint +// and will fully delete them. +func (s *stripeSeries) GC(mint int64) map[uint64]struct{} { + deleted := map[uint64]struct{}{} + + for hashLock := 0; hashLock < s.size; hashLock++ { + s.locks[hashLock].Lock() + + for hash, all := range s.hashes[hashLock] { + for _, series := range all { + series.Lock() + + // Any series that has received a write since mint is still alive. + if series.lastTs >= mint { + series.Unlock() + continue + } + + // The series is stale. We need to obtain a second lock for the + // ref if it's different than the hash lock. + refLock := int(series.ref) & (s.size - 1) + if hashLock != refLock { + s.locks[refLock].Lock() + } + + deleted[series.ref] = struct{}{} + delete(s.series[refLock], series.ref) + s.hashes[hashLock].Delete(hash, series.ref) + + if hashLock != refLock { + s.locks[refLock].Unlock() + } + series.Unlock() + } + } + + s.locks[hashLock].Unlock() + } + + return deleted +} + +func (s *stripeSeries) GetByID(id uint64) *memSeries { + refLock := id & uint64(s.size-1) + + s.locks[refLock].RLock() + defer s.locks[refLock].RUnlock() + return s.series[refLock][id] +} + +func (s *stripeSeries) GetByHash(hash uint64, lset labels.Labels) *memSeries { + hashLock := hash & uint64(s.size-1) + + s.locks[hashLock].RLock() + defer s.locks[hashLock].RUnlock() + return s.hashes[hashLock].Get(hash, lset) +} + +func (s *stripeSeries) Set(hash uint64, series *memSeries) { + var ( + hashLock = hash & uint64(s.size-1) + refLock = series.ref & uint64(s.size-1) + ) + s.locks[hashLock].Lock() + defer s.locks[hashLock].Unlock() + + if hashLock != refLock { + s.locks[refLock].Lock() + defer s.locks[refLock].Unlock() + } + + s.hashes[hashLock].Set(hash, series) + s.series[refLock][series.ref] = series +} diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 26ce55c8b3..4d85a56885 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -181,6 +181,7 @@ type API struct { buildInfo *PrometheusVersion runtimeInfo func() (RuntimeInfo, error) gatherer prometheus.Gatherer + isAgent bool remoteWriteHandler http.Handler remoteReadHandler http.Handler @@ -211,6 +212,7 @@ func NewAPI( remoteReadSampleLimit int, remoteReadConcurrencyLimit int, remoteReadMaxBytesInFrame int, + isAgent bool, CORSOrigin *regexp.Regexp, runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, @@ -239,6 +241,7 @@ func NewAPI( runtimeInfo: runtimeInfo, buildInfo: buildInfo, gatherer: gatherer, + isAgent: isAgent, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), } @@ -282,26 +285,35 @@ func (api *API) Register(r *route.Router) { }.ServeHTTP) } + wrapAgent := func(f apiFunc) http.HandlerFunc { + return wrap(func(r *http.Request) apiFuncResult { + if api.isAgent { + return apiFuncResult{nil, &apiError{errorExec, errors.New("unavailable with Prometheus Agent")}, nil, nil} + } + return f(r) + }) + } + r.Options("/*path", wrap(api.options)) - r.Get("/query", wrap(api.query)) - r.Post("/query", wrap(api.query)) - r.Get("/query_range", wrap(api.queryRange)) - r.Post("/query_range", wrap(api.queryRange)) - r.Get("/query_exemplars", wrap(api.queryExemplars)) - r.Post("/query_exemplars", wrap(api.queryExemplars)) + r.Get("/query", wrapAgent(api.query)) + r.Post("/query", wrapAgent(api.query)) + r.Get("/query_range", wrapAgent(api.queryRange)) + r.Post("/query_range", wrapAgent(api.queryRange)) + r.Get("/query_exemplars", wrapAgent(api.queryExemplars)) + r.Post("/query_exemplars", wrapAgent(api.queryExemplars)) - r.Get("/labels", wrap(api.labelNames)) - r.Post("/labels", wrap(api.labelNames)) - r.Get("/label/:name/values", wrap(api.labelValues)) + r.Get("/labels", wrapAgent(api.labelNames)) + r.Post("/labels", wrapAgent(api.labelNames)) + r.Get("/label/:name/values", wrapAgent(api.labelValues)) - r.Get("/series", wrap(api.series)) - r.Post("/series", wrap(api.series)) - r.Del("/series", wrap(api.dropSeries)) + r.Get("/series", wrapAgent(api.series)) + r.Post("/series", wrapAgent(api.series)) + r.Del("/series", wrapAgent(api.dropSeries)) r.Get("/targets", wrap(api.targets)) r.Get("/targets/metadata", wrap(api.targetMetadata)) - r.Get("/alertmanagers", wrap(api.alertmanagers)) + r.Get("/alertmanagers", wrapAgent(api.alertmanagers)) r.Get("/metadata", wrap(api.metricMetadata)) @@ -309,22 +321,22 @@ func (api *API) Register(r *route.Router) { r.Get("/status/runtimeinfo", wrap(api.serveRuntimeInfo)) r.Get("/status/buildinfo", wrap(api.serveBuildInfo)) r.Get("/status/flags", wrap(api.serveFlags)) - r.Get("/status/tsdb", wrap(api.serveTSDBStatus)) + r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus)) r.Get("/status/walreplay", api.serveWALReplayStatus) r.Post("/read", api.ready(api.remoteRead)) r.Post("/write", api.ready(api.remoteWrite)) - r.Get("/alerts", wrap(api.alerts)) - r.Get("/rules", wrap(api.rules)) + r.Get("/alerts", wrapAgent(api.alerts)) + r.Get("/rules", wrapAgent(api.rules)) // Admin APIs - r.Post("/admin/tsdb/delete_series", wrap(api.deleteSeries)) - r.Post("/admin/tsdb/clean_tombstones", wrap(api.cleanTombstones)) - r.Post("/admin/tsdb/snapshot", wrap(api.snapshot)) + r.Post("/admin/tsdb/delete_series", wrapAgent(api.deleteSeries)) + r.Post("/admin/tsdb/clean_tombstones", wrapAgent(api.cleanTombstones)) + r.Post("/admin/tsdb/snapshot", wrapAgent(api.snapshot)) - r.Put("/admin/tsdb/delete_series", wrap(api.deleteSeries)) - r.Put("/admin/tsdb/clean_tombstones", wrap(api.cleanTombstones)) - r.Put("/admin/tsdb/snapshot", wrap(api.snapshot)) + r.Put("/admin/tsdb/delete_series", wrapAgent(api.deleteSeries)) + r.Put("/admin/tsdb/clean_tombstones", wrapAgent(api.cleanTombstones)) + r.Put("/admin/tsdb/snapshot", wrapAgent(api.snapshot)) } type queryData struct { diff --git a/web/web.go b/web/web.go index db0069fb6f..269abd0bf0 100644 --- a/web/web.go +++ b/web/web.go @@ -248,6 +248,7 @@ type Options struct { RemoteReadConcurrencyLimit int RemoteReadBytesInFrame int RemoteWriteReceiver bool + IsAgent bool Gatherer prometheus.Gatherer Registerer prometheus.Registerer @@ -328,6 +329,7 @@ func New(logger log.Logger, o *Options) *Handler { h.options.RemoteReadSampleLimit, h.options.RemoteReadConcurrencyLimit, h.options.RemoteReadBytesInFrame, + h.options.IsAgent, h.options.CORSOrigin, h.runtimeInfo, h.versionInfo, diff --git a/web/web_test.go b/web/web_test.go index 399e552663..868fd08e6e 100644 --- a/web/web_test.go +++ b/web/web_test.go @@ -526,6 +526,68 @@ func TestHandleMultipleQuitRequests(t *testing.T) { } } +// Test for availability of API endpoints in Prometheus Agent mode. +func TestAgentAPIEndPoints(t *testing.T) { + t.Parallel() + + opts := &Options{ + ListenAddress: ":9090", + ReadTimeout: 30 * time.Second, + MaxConnections: 512, + Context: nil, + Storage: nil, + QueryEngine: nil, + ScrapeManager: &scrape.Manager{}, + RuleManager: &rules.Manager{}, + Notifier: nil, + RoutePrefix: "/", + EnableAdminAPI: true, + ExternalURL: &url.URL{ + Scheme: "http", + Host: "localhost:9090", + Path: "/", + }, + Version: &PrometheusVersion{}, + Gatherer: prometheus.DefaultGatherer, + IsAgent: true, + } + + opts.Flags = map[string]string{} + + webHandler := New(nil, opts) + webHandler.Ready() + + // Test for non-available endpoints in the Agent mode. + for _, u := range []string{ + "http://localhost:9090/-/labels", + "http://localhost:9090/label", + "http://localhost:9090/series", + "http://localhost:9090/alertmanagers", + "http://localhost:9090/query", + "http://localhost:9090/query_range", + "http://localhost:9090/query_exemplars", + } { + w := httptest.NewRecorder() + req, err := http.NewRequest("GET", u, nil) + require.NoError(t, err) + webHandler.router.ServeHTTP(w, req) + require.Equal(t, http.StatusNotFound, w.Code) + } + + // Test for available endpoints in the Agent mode. + for _, u := range []string{ + "http://localhost:9090/targets", + "http://localhost:9090/status", + } { + w := httptest.NewRecorder() + req, err := http.NewRequest("GET", u, nil) + require.NoError(t, err) + webHandler.router.ServeHTTP(w, req) + fmt.Println(u) + require.Equal(t, http.StatusOK, w.Code) + } +} + func cleanupTestResponse(t *testing.T, resp *http.Response) { _, err := io.Copy(ioutil.Discard, resp.Body) require.NoError(t, err) From 5afa606ecbd38f795056cd1300bef42a48121a6a Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 29 Oct 2021 22:44:40 +0100 Subject: [PATCH 035/161] Remote-write: reuse memory for marshalling (#9412) By holding a `proto.Buffer` per shard and passing it down to where marshalling is done, we avoid creating a lot of garbage. Signed-off-by: Bryan Boreham --- storage/remote/codec_test.go | 2 +- storage/remote/queue_manager.go | 37 +++++++++++++++++----------- storage/remote/write_handler_test.go | 8 +++--- 3 files changed, 27 insertions(+), 20 deletions(-) diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 68cfe9899f..8aad3afeb3 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -292,7 +292,7 @@ func TestMetricTypeToMetricTypeProto(t *testing.T) { } func TestDecodeWriteRequest(t *testing.T) { - buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil) + buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil) require.NoError(t, err) actual, err := DecodeWriteRequest(bytes.NewReader(buf)) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 17e57ea85c..9ac3b926b5 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -433,7 +433,7 @@ func NewQueueManager( return t } -// AppendMetadata sends metadata the remote storage. Metadata is sent all at once and is not parallelized. +// AppendMetadata sends metadata the remote storage. Metadata is sent in batches, but is not parallelized. func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.MetricMetadata) { mm := make([]prompb.MetricMetadata, 0, len(metadata)) for _, entry := range metadata { @@ -445,13 +445,14 @@ func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.Met }) } + pBuf := proto.NewBuffer(nil) numSends := int(math.Ceil(float64(len(metadata)) / float64(t.mcfg.MaxSamplesPerSend))) for i := 0; i < numSends; i++ { last := (i + 1) * t.mcfg.MaxSamplesPerSend if last > len(metadata) { last = len(metadata) } - err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last]) + err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last], pBuf) if err != nil { t.metrics.failedMetadataTotal.Add(float64(last - (i * t.mcfg.MaxSamplesPerSend))) level.Error(t.logger).Log("msg", "non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) @@ -459,9 +460,9 @@ func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.Met } } -func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata) error { +func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error { // Build the WriteRequest with no samples. - req, _, err := buildWriteRequest(nil, metadata, nil) + req, _, err := buildWriteRequest(nil, metadata, pBuf, nil) if err != nil { return err } @@ -1040,7 +1041,8 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface max = s.qm.cfg.MaxSamplesPerSend nPending, nPendingSamples, nPendingExemplars = 0, 0, 0 - buf []byte + pBuf = proto.NewBuffer(nil) + buf []byte ) if s.qm.sendExemplars { max += int(float64(max) * 0.1) @@ -1084,7 +1086,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface if !ok { if nPendingSamples > 0 || nPendingExemplars > 0 { level.Debug(s.qm.logger).Log("msg", "Flushing data to remote storage...", "samples", nPendingSamples, "exemplars", nPendingExemplars) - s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf) + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, pBuf, &buf) s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) level.Debug(s.qm.logger).Log("msg", "Done flushing.") @@ -1114,7 +1116,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface } if nPending >= max { - s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf) + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, pBuf, &buf) s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) nPendingSamples = 0 @@ -1128,7 +1130,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface case <-timer.C: if nPendingSamples > 0 || nPendingExemplars > 0 { level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum) - s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf) + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, pBuf, &buf) s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) nPendingSamples = 0 @@ -1140,9 +1142,9 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface } } -func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, buf *[]byte) { +func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) { begin := time.Now() - err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, buf) + err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf) if err != nil { level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err) s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount)) @@ -1157,9 +1159,9 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s } // sendSamples to the remote storage with backoff for recoverable errors. -func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, buf *[]byte) error { +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error { // Build the WriteRequest with no metadata. - req, highest, err := buildWriteRequest(samples, nil, *buf) + req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf) if err != nil { // Failing to build the write request is non-recoverable, since it will // only error if marshaling the proto to bytes fails. @@ -1266,7 +1268,7 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l } } -func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMetadata, buf []byte) ([]byte, int64, error) { +func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf []byte) ([]byte, int64, error) { var highest int64 for _, ts := range samples { // At the moment we only ever append a TimeSeries with a single sample or exemplar in it. @@ -1283,7 +1285,12 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta Metadata: metadata, } - data, err := proto.Marshal(req) + if pBuf == nil { + pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient. + } else { + pBuf.Reset() + } + err := pBuf.Marshal(req) if err != nil { return nil, highest, err } @@ -1293,6 +1300,6 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta if buf != nil { buf = buf[0:cap(buf)] } - compressed := snappy.Encode(buf, data) + compressed := snappy.Encode(buf, pBuf.Bytes()) return compressed, highest, nil } diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index d4ba5bfd8b..b4930f00b5 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -32,7 +32,7 @@ import ( ) func TestRemoteWriteHandler(t *testing.T) { - buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil) + buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil) require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -68,7 +68,7 @@ func TestOutOfOrderSample(t *testing.T) { buf, _, err := buildWriteRequest([]prompb.TimeSeries{{ Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, Samples: []prompb.Sample{{Value: 1, Timestamp: 0}}, - }}, nil, nil) + }}, nil, nil, nil) require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -93,7 +93,7 @@ func TestOutOfOrderExemplar(t *testing.T) { buf, _, err := buildWriteRequest([]prompb.TimeSeries{{ Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: 0}}, - }}, nil, nil) + }}, nil, nil, nil) require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -113,7 +113,7 @@ func TestOutOfOrderExemplar(t *testing.T) { } func TestCommitErr(t *testing.T) { - buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil) + buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil) require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) From d42be7be761ddef2aa8028aebdd7028a2deb6baf Mon Sep 17 00:00:00 2001 From: lzhfromustc <43191155+lzhfromustc@users.noreply.github.com> Date: Fri, 29 Oct 2021 15:44:32 -0700 Subject: [PATCH 036/161] test:Fix two potential goroutine leaks (#8964) Signed-off-by: lzhfromustc --- discovery/manager_test.go | 6 +++++- storage/remote/queue_manager_test.go | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 2caae0080c..657130e5e9 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -1371,7 +1371,11 @@ func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgr for i := range u.targetGroups { tgs[i] = &u.targetGroups[i] } - upCh <- tgs + select { + case <-ctx.Done(): + return + case upCh <- tgs: + } } <-ctx.Done() } diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index ccca000dfe..f56b6d90c6 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -408,6 +408,7 @@ func TestReleaseNoninternedString(t *testing.T) { c := NewTestWriteClient() m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.Start() + defer m.Stop() for i := 1; i < 1000; i++ { m.StoreSeries([]record.RefSeries{ From 9da538210330bd30572a40b1dc263c7c5cebbfae Mon Sep 17 00:00:00 2001 From: lzhfromustc <43191155+lzhfromustc@users.noreply.github.com> Date: Fri, 29 Oct 2021 16:39:02 -0700 Subject: [PATCH 037/161] storage/remote: Prevent two goroutines from endless loop (#8967) Signed-off-by: lzhfromustc --- storage/remote/storage.go | 6 ++++-- storage/remote/write.go | 12 ++++++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/storage/remote/storage.go b/storage/remote/storage.go index df20af7134..cebbffdd1a 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -51,7 +51,7 @@ type startTimeCallback func() (int64, error) // Storage represents all the remote read and write endpoints. It implements // storage.Storage. type Storage struct { - logger log.Logger + logger *logging.Deduper mtx sync.Mutex rws *WriteStorage @@ -66,9 +66,10 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal if l == nil { l = log.NewNopLogger() } + logger := logging.Dedupe(l, 1*time.Minute) s := &Storage{ - logger: logging.Dedupe(l, 1*time.Minute), + logger: logger, localStartTimeCallback: stCallback, } s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm) @@ -186,6 +187,7 @@ func (s *Storage) LowestSentTimestamp() int64 { // Close the background processing of the storage queues. func (s *Storage) Close() error { + s.logger.Stop() s.mtx.Lock() defer s.mtx.Unlock() return s.rws.Close() diff --git a/storage/remote/write.go b/storage/remote/write.go index b56d258d7b..cbbfaf52e0 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -61,6 +61,7 @@ type WriteStorage struct { flushDeadline time.Duration interner *pool scraper ReadyScrapeManager + quit chan struct{} // For timestampTracker. highestTimestamp *maxTimestamp @@ -82,6 +83,7 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, walDir string walDir: walDir, interner: newPool(), scraper: sm, + quit: make(chan struct{}), highestTimestamp: &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, @@ -101,8 +103,13 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, walDir string func (rws *WriteStorage) run() { ticker := time.NewTicker(shardUpdateDuration) defer ticker.Stop() - for range ticker.C { - rws.samplesIn.tick() + for { + select { + case <-ticker.C: + rws.samplesIn.tick() + case <-rws.quit: + return + } } } @@ -235,6 +242,7 @@ func (rws *WriteStorage) Close() error { for _, q := range rws.queues { q.Stop() } + close(rws.quit) return nil } From be2599c853840eb3a86500e2682b8c1f392d4e3d Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Fri, 29 Oct 2021 20:41:40 -0300 Subject: [PATCH 038/161] config: Make remote-write required for Agent mode (#9618) * config: Make remote-write required for Agent mode Signed-off-by: ArthurSens --- cmd/prometheus/main.go | 4 +-- cmd/prometheus/main_test.go | 23 ++++++++++++- cmd/promtool/main.go | 11 ++++--- cmd/promtool/main_test.go | 2 +- config/config.go | 21 +++++++++++- config/config_test.go | 36 ++++++++++----------- documentation/examples/prometheus-agent.yml | 22 +++++++++++++ 7 files changed, 91 insertions(+), 28 deletions(-) create mode 100644 documentation/examples/prometheus-agent.yml diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e69e9f2dbd..d78e1420a8 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -452,7 +452,7 @@ func main() { // Throw error for invalid config before starting other components. var cfgFile *config.Config - if cfgFile, err = config.LoadFile(cfg.configFile, false, log.NewNopLogger()); err != nil { + if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil { level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "err", err) os.Exit(2) } @@ -1162,7 +1162,7 @@ func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStor } }() - conf, err := config.LoadFile(filename, expandExternalLabels, logger) + conf, err := config.LoadFile(filename, agentMode, expandExternalLabels, logger) if err != nil { return errors.Wrapf(err, "couldn't load configuration (--config.file=%q)", filename) } diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 6f9a1d566b..6f4d58b94f 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -37,6 +37,7 @@ import ( var promPath = os.Args[0] var promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml") +var agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml") var promData = filepath.Join(os.TempDir(), "data") func TestMain(m *testing.M) { @@ -349,7 +350,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } func TestAgentSuccessfulStartup(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--agent", "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--agent", "--config.file="+agentConfig) err := prom.Start() require.NoError(t, err) @@ -367,3 +368,23 @@ func TestAgentSuccessfulStartup(t *testing.T) { } require.Equal(t, expectedExitStatus, actualExitStatus) } + +func TestAgentStartupWithInvalidConfig(t *testing.T) { + prom := exec.Command(promPath, "-test.main", "--agent", "--config.file="+promConfig) + err := prom.Start() + require.NoError(t, err) + + expectedExitStatus := 2 + actualExitStatus := 0 + + done := make(chan error, 1) + go func() { done <- prom.Wait() }() + select { + case err := <-done: + t.Logf("prometheus agent should not be running: %v", err) + actualExitStatus = prom.ProcessState.ExitCode() + case <-time.After(5 * time.Second): + prom.Process.Kill() + } + require.Equal(t, expectedExitStatus, actualExitStatus) +} diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 9b436ff712..8763e78b61 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -82,6 +82,7 @@ func main() { ).Required().ExistingFiles() checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage) + agentMode := checkConfigCmd.Flag("agent", "Check config file for Prometheus in Agent mode.").Bool() queryCmd := app.Command("query", "Run query against a Prometheus server.") queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json") @@ -202,7 +203,7 @@ func main() { switch parsedCmd { case checkConfigCmd.FullCommand(): - os.Exit(CheckConfig(*configFiles...)) + os.Exit(CheckConfig(*agentMode, *configFiles...)) case checkWebConfigCmd.FullCommand(): os.Exit(CheckWebConfig(*webConfigFiles...)) @@ -258,11 +259,11 @@ func main() { } // CheckConfig validates configuration files. -func CheckConfig(files ...string) int { +func CheckConfig(agentMode bool, files ...string) int { failed := false for _, f := range files { - ruleFiles, err := checkConfig(f) + ruleFiles, err := checkConfig(agentMode, f) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true @@ -317,10 +318,10 @@ func checkFileExists(fn string) error { return err } -func checkConfig(filename string) ([]string, error) { +func checkConfig(agentMode bool, filename string) ([]string, error) { fmt.Println("Checking", filename) - cfg, err := config.LoadFile(filename, false, log.NewNopLogger()) + cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger()) if err != nil { return nil, err } diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 8ca078cc57..138548fc75 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -193,7 +193,7 @@ func TestCheckTargetConfig(t *testing.T) { } for _, test := range cases { t.Run(test.name, func(t *testing.T) { - _, err := checkConfig("testdata/" + test.file) + _, err := checkConfig(false, "testdata/"+test.file) if test.err != "" { require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) return diff --git a/config/config.go b/config/config.go index dc2ed19a25..9729642ae0 100644 --- a/config/config.go +++ b/config/config.go @@ -99,7 +99,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro } // LoadFile parses the given YAML file into a Config. -func LoadFile(filename string, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func LoadFile(filename string, agentMode bool, expandExternalLabels bool, logger log.Logger) (*Config, error) { content, err := ioutil.ReadFile(filename) if err != nil { return nil, err @@ -108,6 +108,25 @@ func LoadFile(filename string, expandExternalLabels bool, logger log.Logger) (*C if err != nil { return nil, errors.Wrapf(err, "parsing YAML file %s", filename) } + + if agentMode { + if len(cfg.RemoteWriteConfigs) == 0 { + return nil, errors.New("at least one remote_write target must be specified in agent mode") + } + + if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 { + return nil, errors.New("field alerting is not allowed in agent mode") + } + + if len(cfg.RuleFiles) > 0 { + return nil, errors.New("field rule_files is not allowed in agent mode") + } + + if len(cfg.RemoteReadConfigs) > 0 { + return nil, errors.New("field remote_read is not allowed in agent mode") + } + } + cfg.SetDirectory(filepath.Dir(filename)) return cfg, nil } diff --git a/config/config_test.go b/config/config_test.go index a2efaa6768..f26e4f3e66 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -986,7 +986,7 @@ var expectedConf = &Config{ } func TestYAMLRoundtrip(t *testing.T) { - want, err := LoadFile("testdata/roundtrip.good.yml", false, log.NewNopLogger()) + want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -999,7 +999,7 @@ func TestYAMLRoundtrip(t *testing.T) { } func TestRemoteWriteRetryOnRateLimit(t *testing.T) { - want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, log.NewNopLogger()) + want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -1015,16 +1015,16 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) { func TestLoadConfig(t *testing.T) { // Parse a valid file that sets a global scrape timeout. This tests whether parsing // an overwritten default field in the global config permanently changes the default. - _, err := LoadFile("testdata/global_timeout.good.yml", false, log.NewNopLogger()) + _, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - c, err := LoadFile("testdata/conf.good.yml", false, log.NewNopLogger()) + c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, expectedConf, c) } func TestScrapeIntervalLarger(t *testing.T) { - c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, log.NewNopLogger()) + c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, 1, len(c.ScrapeConfigs)) for _, sc := range c.ScrapeConfigs { @@ -1034,7 +1034,7 @@ func TestScrapeIntervalLarger(t *testing.T) { // YAML marshaling must not reveal authentication credentials. func TestElideSecrets(t *testing.T) { - c, err := LoadFile("testdata/conf.good.yml", false, log.NewNopLogger()) + c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) secretRe := regexp.MustCompile(`\\u003csecret\\u003e|`) @@ -1051,31 +1051,31 @@ func TestElideSecrets(t *testing.T) { func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) { // Parse a valid file that sets a rule files with an absolute path - c, err := LoadFile(ruleFilesConfigFile, false, log.NewNopLogger()) + c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, ruleFilesExpectedConf, c) } func TestKubernetesEmptyAPIServer(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) } func TestKubernetesWithKubeConfig(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) } func TestKubernetesSelectors(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) } @@ -1381,7 +1381,7 @@ var expectedErrors = []struct { func TestBadConfigs(t *testing.T) { for _, ee := range expectedErrors { - _, err := LoadFile("testdata/"+ee.filename, false, log.NewNopLogger()) + _, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) require.Error(t, err, "%s", ee.filename) require.Contains(t, err.Error(), ee.errMsg, "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) @@ -1415,20 +1415,20 @@ func TestExpandExternalLabels(t *testing.T) { // Cleanup ant TEST env variable that could exist on the system. os.Setenv("TEST", "") - c, err := LoadFile("testdata/external_labels.good.yml", false, log.NewNopLogger()) + c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0]) require.Equal(t, labels.Label{Name: "baz", Value: "foo${TEST}bar"}, c.GlobalConfig.ExternalLabels[1]) require.Equal(t, labels.Label{Name: "foo", Value: "${TEST}"}, c.GlobalConfig.ExternalLabels[2]) - c, err = LoadFile("testdata/external_labels.good.yml", true, log.NewNopLogger()) + c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0]) require.Equal(t, labels.Label{Name: "baz", Value: "foobar"}, c.GlobalConfig.ExternalLabels[1]) require.Equal(t, labels.Label{Name: "foo", Value: ""}, c.GlobalConfig.ExternalLabels[2]) os.Setenv("TEST", "TestValue") - c, err = LoadFile("testdata/external_labels.good.yml", true, log.NewNopLogger()) + c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0]) require.Equal(t, labels.Label{Name: "baz", Value: "fooTestValuebar"}, c.GlobalConfig.ExternalLabels[1]) diff --git a/documentation/examples/prometheus-agent.yml b/documentation/examples/prometheus-agent.yml new file mode 100644 index 0000000000..0e51808170 --- /dev/null +++ b/documentation/examples/prometheus-agent.yml @@ -0,0 +1,22 @@ +# my global config +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: "prometheus" + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ["localhost:9090"] + +# When running prometheus in Agent mode, remote-write is required. +remote_write: + # Agent is able to run with a invalid remote-write URL, but, of course, will fail to push timeseries. + - url: "http://remote-write-url" From 0a9deb9597e08da85e55fc4b1ccb38cc6f11412d Mon Sep 17 00:00:00 2001 From: darshanime Date: Mon, 25 Oct 2021 16:32:40 +0530 Subject: [PATCH 039/161] use kahan summation for numerical stability Signed-off-by: darshanime --- promql/functions.go | 54 ++++++++++++++++++++++++++++++++-------- promql/functions_test.go | 7 ++++++ 2 files changed, 50 insertions(+), 11 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 50594503dc..9982b92c72 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -439,11 +439,14 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return aggrOverTime(vals, enh, func(values []Point) float64 { - var sum float64 + var sum, c float64 for _, v := range values { - sum += v.V + sum, c = kahanSummationIter(v.V, sum, c) } - return sum + if math.IsInf(sum, 0) { + return sum + } + return sum + c }) } @@ -675,23 +678,52 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe return enh.Out } +func kahanSummation(samples []float64) float64 { + sum, c := 0.0, 0.0 + + for _, v := range samples { + sum, c = kahanSummationIter(v, sum, c) + } + return sum + c +} + +func kahanSummationIter(v, sum, c float64) (float64, float64) { + t := sum + v + // using Neumaier improvement, swap if next term larger than sum + if math.Abs(sum) >= math.Abs(v) { + c += (sum - t) + v + } else { + c += (v - t) + sum + } + sum = t + return sum, c +} + // linearRegression performs a least-square linear regression analysis on the // provided SamplePairs. It returns the slope, and the intercept value at the // provided time. func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) { var ( - n float64 - sumX, sumY float64 - sumXY, sumX2 float64 + n float64 + sumX, cX float64 + sumY, cY float64 + sumXY, cXY float64 + sumX2, cX2 float64 ) for _, sample := range samples { - x := float64(sample.T-interceptTime) / 1e3 n += 1.0 - sumY += sample.V - sumX += x - sumXY += x * sample.V - sumX2 += x * x + x := float64(sample.T-interceptTime) / 1e3 + sumX, cX = kahanSummationIter(x, sumX, cX) + sumY, cY = kahanSummationIter(sample.V, sumY, cY) + sumXY, cXY = kahanSummationIter(x*sample.V, sumXY, cXY) + sumX2, cX2 = kahanSummationIter(x*x, sumX2, cX2) } + + sumX = sumX + cX + sumY = sumY + cY + sumXY = sumXY + cXY + sumX2 = sumX2 + cX2 + covXY := sumXY - sumX*sumY/n varX := sumX2 - sumX*sumX/n diff --git a/promql/functions_test.go b/promql/functions_test.go index 5707cbed31..b0a8b5db6a 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -15,6 +15,7 @@ package promql import ( "context" + "math" "testing" "time" @@ -71,3 +72,9 @@ func TestFunctionList(t *testing.T) { require.True(t, ok, "function %s exists in parser package, but not in promql package", i) } } + +func TestKahanSummation(t *testing.T) { + vals := []float64{1.0, math.Pow(10, 100), 1.0, -1 * math.Pow(10, 100)} + expected := 2.0 + require.Equal(t, expected, kahanSummation(vals)) +} From a905354da35b371605c89a74bd7a8e8d566e5970 Mon Sep 17 00:00:00 2001 From: darshanime Date: Mon, 25 Oct 2021 18:02:25 +0530 Subject: [PATCH 040/161] use kahan for avg_over_time Signed-off-by: darshanime --- promql/functions.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 9982b92c72..e43cc85abd 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -367,7 +367,7 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func([]Point) // === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return aggrOverTime(vals, enh, func(values []Point) float64 { - var mean, count float64 + var mean, count, c float64 for _, v := range values { count++ if math.IsInf(mean, 0) { @@ -387,9 +387,13 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean += v.V/count - mean/count + mean, c = kahanSummationIter(v.V/count-mean/count, mean, c) } - return mean + + if math.IsInf(mean, 0) { + return mean + } + return mean + c }) } From 694b872deea7f5f385933714618610b8491d6f4e Mon Sep 17 00:00:00 2001 From: darshanime Date: Sat, 30 Oct 2021 19:08:23 +0530 Subject: [PATCH 041/161] address stylistic nits Signed-off-by: darshanime --- promql/functions.go | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index e43cc85abd..e737992e46 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -387,7 +387,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean, c = kahanSummationIter(v.V/count-mean/count, mean, c) + mean, c = kahanSumInc(v.V/count-mean/count, mean, c) } if math.IsInf(mean, 0) { @@ -445,7 +445,7 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return aggrOverTime(vals, enh, func(values []Point) float64 { var sum, c float64 for _, v := range values { - sum, c = kahanSummationIter(v.V, sum, c) + sum, c = kahanSumInc(v.V, sum, c) } if math.IsInf(sum, 0) { return sum @@ -682,25 +682,24 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe return enh.Out } -func kahanSummation(samples []float64) float64 { - sum, c := 0.0, 0.0 +func kahanSum(samples []float64) float64 { + var sum, c float64 for _, v := range samples { - sum, c = kahanSummationIter(v, sum, c) + sum, c = kahanSumInc(v, sum, c) } return sum + c } -func kahanSummationIter(v, sum, c float64) (float64, float64) { - t := sum + v - // using Neumaier improvement, swap if next term larger than sum - if math.Abs(sum) >= math.Abs(v) { - c += (sum - t) + v +func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { + t := sum + inc + // Using Neumaier improvement, swap if next term larger than sum. + if math.Abs(sum) >= math.Abs(inc) { + c += (sum - t) + inc } else { - c += (v - t) + sum + c += (inc - t) + sum } - sum = t - return sum, c + return t, c } // linearRegression performs a least-square linear regression analysis on the @@ -717,10 +716,10 @@ func linearRegression(samples []Point, interceptTime int64) (slope, intercept fl for _, sample := range samples { n += 1.0 x := float64(sample.T-interceptTime) / 1e3 - sumX, cX = kahanSummationIter(x, sumX, cX) - sumY, cY = kahanSummationIter(sample.V, sumY, cY) - sumXY, cXY = kahanSummationIter(x*sample.V, sumXY, cXY) - sumX2, cX2 = kahanSummationIter(x*x, sumX2, cX2) + sumX, cX = kahanSumInc(x, sumX, cX) + sumY, cY = kahanSumInc(sample.V, sumY, cY) + sumXY, cXY = kahanSumInc(x*sample.V, sumXY, cXY) + sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2) } sumX = sumX + cX From 42d786f1acb1fe315a194478bea69acdcd4aca56 Mon Sep 17 00:00:00 2001 From: darshanime Date: Sat, 30 Oct 2021 19:41:36 +0530 Subject: [PATCH 042/161] use kahan summation for aggregation functions Signed-off-by: darshanime --- promql/functions.go | 24 ++++++++++++++---------- promql/functions_test.go | 4 ++-- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index e737992e46..49ff09678f 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -471,28 +471,32 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva // === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return aggrOverTime(vals, enh, func(values []Point) float64 { - var aux, count, mean float64 + var count float64 + var mean, cMean float64 + var aux, cAux float64 for _, v := range values { count++ - delta := v.V - mean - mean += delta / count - aux += delta * (v.V - mean) + delta := v.V - (mean + cMean) + mean, cMean = kahanSumInc(delta/count, mean, cMean) + aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) } - return math.Sqrt(aux / count) + return math.Sqrt((aux + cAux) / count) }) } // === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return aggrOverTime(vals, enh, func(values []Point) float64 { - var aux, count, mean float64 + var count float64 + var mean, cMean float64 + var aux, cAux float64 for _, v := range values { count++ - delta := v.V - mean - mean += delta / count - aux += delta * (v.V - mean) + delta := v.V - (mean + cMean) + mean, cMean = kahanSumInc(delta/count, mean, cMean) + aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) } - return aux / count + return (aux + cAux) / count }) } diff --git a/promql/functions_test.go b/promql/functions_test.go index b0a8b5db6a..19ee105da7 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -73,8 +73,8 @@ func TestFunctionList(t *testing.T) { } } -func TestKahanSummation(t *testing.T) { +func TestKahanSum(t *testing.T) { vals := []float64{1.0, math.Pow(10, 100), 1.0, -1 * math.Pow(10, 100)} expected := 2.0 - require.Equal(t, expected, kahanSummation(vals)) + require.Equal(t, expected, kahanSum(vals)) } From 64d9b419980479536b237244d87e4d9c5cc816cf Mon Sep 17 00:00:00 2001 From: Praveen Ghuge Date: Mon, 1 Nov 2021 12:28:18 +0530 Subject: [PATCH 043/161] Use testing.T.TempDir() instead of ioutil.TempDir() in tsdb/wal unit tests (#9602) Signed-off-by: Praveen Ghuge --- tsdb/wal/checkpoint_test.go | 26 ++++---------------- tsdb/wal/reader_test.go | 24 +++--------------- tsdb/wal/wal_test.go | 44 ++++++--------------------------- tsdb/wal/watcher_test.go | 49 +++++++++---------------------------- 4 files changed, 29 insertions(+), 114 deletions(-) diff --git a/tsdb/wal/checkpoint_test.go b/tsdb/wal/checkpoint_test.go index f84fb4dd89..9df533df1f 100644 --- a/tsdb/wal/checkpoint_test.go +++ b/tsdb/wal/checkpoint_test.go @@ -31,13 +31,9 @@ import ( ) func TestLastCheckpoint(t *testing.T) { - dir, err := ioutil.TempDir("", "test_checkpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() - _, _, err = LastCheckpoint(dir) + _, _, err := LastCheckpoint(dir) require.Equal(t, record.ErrNotFound, err) require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0777)) @@ -78,11 +74,7 @@ func TestLastCheckpoint(t *testing.T) { } func TestDeleteCheckpoints(t *testing.T) { - dir, err := ioutil.TempDir("", "test_checkpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() require.NoError(t, DeleteCheckpoints(dir, 0)) @@ -119,11 +111,7 @@ func TestDeleteCheckpoints(t *testing.T) { func TestCheckpoint(t *testing.T) { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "test_checkpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() var enc record.Encoder // Create a dummy segment to bump the initial number. @@ -240,11 +228,7 @@ func TestCheckpoint(t *testing.T) { func TestCheckpointNoTmpFolderAfterError(t *testing.T) { // Create a new wal with invalid data. - dir, err := ioutil.TempDir("", "test_checkpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, 64*1024, false) require.NoError(t, err) var enc record.Encoder diff --git a/tsdb/wal/reader_test.go b/tsdb/wal/reader_test.go index 8d53612c0c..0513b6782a 100644 --- a/tsdb/wal/reader_test.go +++ b/tsdb/wal/reader_test.go @@ -310,11 +310,7 @@ func TestReaderFuzz(t *testing.T) { for name, fn := range readerConstructors { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("%s,compress=%t", name, compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "wal_fuzz_live") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, 128*pageSize, compress) require.NoError(t, err) @@ -347,11 +343,7 @@ func TestReaderFuzz_Live(t *testing.T) { logger := testutil.NewLogger(t) for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "wal_fuzz_live") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, 128*pageSize, compress) require.NoError(t, err) @@ -432,11 +424,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { // Write a corrupt WAL segment, there is one record of pageSize in length, // but the segment is only half written. logger := testutil.NewLogger(t) - dir, err := ioutil.TempDir("", "wal_live_corrupt") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, pageSize, false) require.NoError(t, err) @@ -476,11 +464,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) { // Write a corrupt WAL segment, when record len > page size. logger := testutil.NewLogger(t) - dir, err := ioutil.TempDir("", "wal_live_corrupt") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, pageSize*2, false) require.NoError(t, err) diff --git a/tsdb/wal/wal_test.go b/tsdb/wal/wal_test.go index b12c0d60c8..d73a94fbbf 100644 --- a/tsdb/wal/wal_test.go +++ b/tsdb/wal/wal_test.go @@ -18,7 +18,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -119,11 +118,7 @@ func TestWALRepair_ReadingError(t *testing.T) { }, } { t.Run(name, func(t *testing.T) { - dir, err := ioutil.TempDir("", "wal_repair") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() // We create 3 segments with 3 records each and // then corrupt a given record in a given segment. @@ -217,11 +212,7 @@ func TestWALRepair_ReadingError(t *testing.T) { // ensures that an error during reading that segment are correctly repaired before // moving to write more records to the WAL. func TestCorruptAndCarryOn(t *testing.T) { - dir, err := ioutil.TempDir("", "wal_repair") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() var ( logger = testutil.NewLogger(t) @@ -345,11 +336,7 @@ func TestCorruptAndCarryOn(t *testing.T) { // TestClose ensures that calling Close more than once doesn't panic and doesn't block. func TestClose(t *testing.T) { - dir, err := ioutil.TempDir("", "wal_repair") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, pageSize, false) require.NoError(t, err) require.NoError(t, w.Close()) @@ -362,11 +349,7 @@ func TestSegmentMetric(t *testing.T) { recordSize = (pageSize / 2) - recordHeaderSize ) - dir, err := ioutil.TempDir("", "segment_metric") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, segmentSize, false) require.NoError(t, err) @@ -393,8 +376,7 @@ func TestCompression(t *testing.T) { records = 100 ) - dirPath, err := ioutil.TempDir("", fmt.Sprintf("TestCompression_%t", compressed)) - require.NoError(t, err) + dirPath := t.TempDir() w, err := NewSize(nil, nil, dirPath, segmentSize, compressed) require.NoError(t, err) @@ -454,9 +436,7 @@ func TestLogPartialWrite(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - dirPath, err := ioutil.TempDir("", "logpartialwrite") - require.NoError(t, err) - defer func() { require.NoError(t, os.RemoveAll(dirPath)) }() + dirPath := t.TempDir() w, err := NewSize(nil, nil, dirPath, segmentSize, false) require.NoError(t, err) @@ -527,11 +507,7 @@ func (f *faultySegmentFile) Write(p []byte) (int, error) { func BenchmarkWAL_LogBatched(b *testing.B) { for _, compress := range []bool{true, false} { b.Run(fmt.Sprintf("compress=%t", compress), func(b *testing.B) { - dir, err := ioutil.TempDir("", "bench_logbatch") - require.NoError(b, err) - defer func() { - require.NoError(b, os.RemoveAll(dir)) - }() + dir := b.TempDir() w, err := New(nil, nil, dir, compress) require.NoError(b, err) @@ -561,11 +537,7 @@ func BenchmarkWAL_LogBatched(b *testing.B) { func BenchmarkWAL_Log(b *testing.B) { for _, compress := range []bool{true, false} { b.Run(fmt.Sprintf("compress=%t", compress), func(b *testing.B) { - dir, err := ioutil.TempDir("", "bench_logsingle") - require.NoError(b, err) - defer func() { - require.NoError(b, os.RemoveAll(dir)) - }() + dir := b.TempDir() w, err := New(nil, nil, dir, compress) require.NoError(b, err) diff --git a/tsdb/wal/watcher_test.go b/tsdb/wal/watcher_test.go index 3ff7d1ad22..a1786f823d 100644 --- a/tsdb/wal/watcher_test.go +++ b/tsdb/wal/watcher_test.go @@ -14,7 +14,6 @@ package wal import ( "fmt" - "io/ioutil" "math/rand" "os" "path" @@ -110,14 +109,10 @@ func TestTailSamples(t *testing.T) { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { now := time.Now() - dir, err := ioutil.TempDir("", "readCheckpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0777) require.NoError(t, err) enc := record.Encoder{} @@ -204,13 +199,9 @@ func TestReadToEndNoCheckpoint(t *testing.T) { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "readToEnd_noCheckpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0777) require.NoError(t, err) w, err := NewSize(nil, nil, wdir, 128*pageSize, compress) @@ -277,14 +268,10 @@ func TestReadToEndWithCheckpoint(t *testing.T) { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "readToEnd_withCheckpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0777) require.NoError(t, err) enc := record.Encoder{} @@ -368,14 +355,10 @@ func TestReadCheckpoint(t *testing.T) { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "readCheckpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0777) require.NoError(t, err) os.Create(SegmentName(wdir, 30)) @@ -440,14 +423,10 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "readCheckpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0777) require.NoError(t, err) enc := record.Encoder{} @@ -522,14 +501,10 @@ func TestCheckpointSeriesReset(t *testing.T) { for _, tc := range testCases { t.Run(fmt.Sprintf("compress=%t", tc.compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "seriesReset") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0777) require.NoError(t, err) enc := record.Encoder{} From 660329d5b311387ff857576c6f4690893f91e714 Mon Sep 17 00:00:00 2001 From: chenlujjj <953546398@qq.com> Date: Mon, 1 Nov 2021 18:35:19 +0800 Subject: [PATCH 044/161] add tombstoneFormatVersionSize & tombstonesCRCSize constants (#9625) Signed-off-by: chenlujjj <953546398@qq.com> --- tsdb/tombstones/tombstones.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tsdb/tombstones/tombstones.go b/tsdb/tombstones/tombstones.go index 8b95481a4f..5f5ca7d09c 100644 --- a/tsdb/tombstones/tombstones.go +++ b/tsdb/tombstones/tombstones.go @@ -39,8 +39,10 @@ const ( // MagicTombstone is 4 bytes at the head of a tombstone file. MagicTombstone = 0x0130BA30 - tombstoneFormatV1 = 1 - tombstonesHeaderSize = 5 + tombstoneFormatV1 = 1 + tombstoneFormatVersionSize = 1 + tombstonesHeaderSize = 5 + tombstonesCRCSize = 4 ) // The table gets initialized with sync.Once but may still cause a race @@ -110,7 +112,7 @@ func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { } // Ignore first byte which is the format type. We do this for compatibility. - if _, err := hash.Write(bytes[1:]); err != nil { + if _, err := hash.Write(bytes[tombstoneFormatVersionSize:]); err != nil { return 0, errors.Wrap(err, "calculating hash for tombstones") } @@ -198,7 +200,7 @@ func ReadTombstones(dir string) (Reader, int64, error) { return nil, 0, errors.Wrap(encoding.ErrInvalidSize, "tombstones header") } - d := &encoding.Decbuf{B: b[:len(b)-4]} // 4 for the checksum. + d := &encoding.Decbuf{B: b[:len(b)-tombstonesCRCSize]} if mg := d.Be32(); mg != MagicTombstone { return nil, 0, fmt.Errorf("invalid magic number %x", mg) } @@ -206,10 +208,10 @@ func ReadTombstones(dir string) (Reader, int64, error) { // Verify checksum. hash := newCRC32() // Ignore first byte which is the format type. - if _, err := hash.Write(d.Get()[1:]); err != nil { + if _, err := hash.Write(d.Get()[tombstoneFormatVersionSize:]); err != nil { return nil, 0, errors.Wrap(err, "write to hash") } - if binary.BigEndian.Uint32(b[len(b)-4:]) != hash.Sum32() { + if binary.BigEndian.Uint32(b[len(b)-tombstonesCRCSize:]) != hash.Sum32() { return nil, 0, errors.New("checksum did not match") } From 4b799c361a6d768314af8b3ea82eccb25298e214 Mon Sep 17 00:00:00 2001 From: Hu Shuai Date: Mon, 1 Nov 2021 18:38:23 +0800 Subject: [PATCH 045/161] Fix in typo in cmd/prometheus/main.go (#9632) Signed-off-by: Hu Shuai --- cmd/prometheus/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index d78e1420a8..e0b8f9e5f4 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1344,7 +1344,7 @@ func (s *readyStorage) StartTime() (int64, error) { case *agent.DB: return db.StartTime() default: - panic(fmt.Sprintf("unkown storage type %T", db)) + panic(fmt.Sprintf("unknown storage type %T", db)) } } From a7e554b158fa8d32ad195a36d81fc45751754899 Mon Sep 17 00:00:00 2001 From: Darshan Chaudhary Date: Mon, 1 Nov 2021 19:12:12 +0530 Subject: [PATCH 046/161] add check service-discovery command (#8970) Signed-off-by: darshanime --- cmd/promtool/main.go | 8 +++ cmd/promtool/sd.go | 148 ++++++++++++++++++++++++++++++++++++++++ cmd/promtool/sd_test.go | 66 ++++++++++++++++++ scrape/manager_test.go | 2 +- scrape/target.go | 6 +- 5 files changed, 226 insertions(+), 4 deletions(-) create mode 100644 cmd/promtool/sd.go create mode 100644 cmd/promtool/sd_test.go diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 8763e78b61..d49892d2d3 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -63,6 +63,11 @@ func main() { checkCmd := app.Command("check", "Check the resources for validity.") + sdCheckCmd := checkCmd.Command("service-discovery", "Perform service discovery for the given job name and report the results, including relabeling.") + sdConfigFile := sdCheckCmd.Arg("config-file", "The prometheus config file.").Required().ExistingFile() + sdJobName := sdCheckCmd.Arg("job", "The job to run service discovery for.").Required().String() + sdTimeout := sdCheckCmd.Flag("timeout", "The time to wait for discovery results.").Default("30s").Duration() + checkConfigCmd := checkCmd.Command("config", "Check if the config files are valid or not.") configFiles := checkConfigCmd.Arg( "config-files", @@ -202,6 +207,9 @@ func main() { } switch parsedCmd { + case sdCheckCmd.FullCommand(): + os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout)) + case checkConfigCmd.FullCommand(): os.Exit(CheckConfig(*agentMode, *configFiles...)) diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go new file mode 100644 index 0000000000..c1cd8e28ba --- /dev/null +++ b/cmd/promtool/sd.go @@ -0,0 +1,148 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "reflect" + "time" + + "github.com/go-kit/log" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/scrape" +) + +type sdCheckResult struct { + DiscoveredLabels labels.Labels `json:"discoveredLabels"` + Labels labels.Labels `json:"labels"` + Error error `json:"error,omitempty"` +} + +// CheckSD performs service discovery for the given job name and reports the results. +func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration) int { + logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + + cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) + if err != nil { + fmt.Fprintln(os.Stderr, "Cannot load config", err) + return 2 + } + + var scrapeConfig *config.ScrapeConfig + jobs := []string{} + jobMatched := false + for _, v := range cfg.ScrapeConfigs { + jobs = append(jobs, v.JobName) + if v.JobName == sdJobName { + jobMatched = true + scrapeConfig = v + break + } + } + + if !jobMatched { + fmt.Fprintf(os.Stderr, "Job %s not found. Select one of:\n", sdJobName) + for _, job := range jobs { + fmt.Fprintf(os.Stderr, "\t%s\n", job) + } + return 1 + } + + targetGroupChan := make(chan []*targetgroup.Group) + ctx, cancel := context.WithTimeout(context.Background(), sdTimeout) + defer cancel() + + for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs { + d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger}) + if err != nil { + fmt.Fprintln(os.Stderr, "Could not create new discoverer", err) + return 2 + } + go d.Run(ctx, targetGroupChan) + } + + var targetGroups []*targetgroup.Group + sdCheckResults := make(map[string][]*targetgroup.Group) +outerLoop: + for { + select { + case targetGroups = <-targetGroupChan: + for _, tg := range targetGroups { + sdCheckResults[tg.Source] = append(sdCheckResults[tg.Source], tg) + } + case <-ctx.Done(): + break outerLoop + } + } + results := []sdCheckResult{} + for _, tgs := range sdCheckResults { + results = append(results, getSDCheckResult(tgs, scrapeConfig)...) + } + + res, err := json.MarshalIndent(results, "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "Could not marshal result json: %s", err) + return 2 + } + + fmt.Printf("%s", res) + return 0 +} + +func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult { + sdCheckResults := []sdCheckResult{} + for _, targetGroup := range targetGroups { + for _, target := range targetGroup.Targets { + labelSlice := make([]labels.Label, 0, len(target)+len(targetGroup.Labels)) + + for name, value := range target { + labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)}) + } + + for name, value := range targetGroup.Labels { + if _, ok := target[name]; !ok { + labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)}) + } + } + + targetLabels := labels.New(labelSlice...) + res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig) + result := sdCheckResult{ + DiscoveredLabels: orig, + Labels: res, + Error: err, + } + + duplicateRes := false + for _, sdCheckRes := range sdCheckResults { + if reflect.DeepEqual(sdCheckRes, result) { + duplicateRes = true + break + } + } + + if !duplicateRes { + sdCheckResults = append(sdCheckResults, result) + } + } + } + return sdCheckResults +} diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go new file mode 100644 index 0000000000..e985197b6f --- /dev/null +++ b/cmd/promtool/sd_test.go @@ -0,0 +1,66 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/relabel" + + "github.com/stretchr/testify/require" +) + +func TestSDCheckResult(t *testing.T) { + targetGroups := []*targetgroup.Group{{ + Targets: []model.LabelSet{ + map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"}, + }, + }} + + reg, err := relabel.NewRegexp("(.*)") + require.Nil(t, err) + + scrapeConfig := &config.ScrapeConfig{ + RelabelConfigs: []*relabel.Config{{ + SourceLabels: model.LabelNames{"foo"}, + Action: relabel.Replace, + TargetLabel: "newfoo", + Regex: reg, + Replacement: "$1", + }}, + } + + expectedSDCheckResult := []sdCheckResult{ + sdCheckResult{ + DiscoveredLabels: labels.Labels{ + labels.Label{Name: "__address__", Value: "localhost:8080"}, + labels.Label{Name: "__scrape_interval__", Value: "0s"}, + labels.Label{Name: "__scrape_timeout__", Value: "0s"}, + labels.Label{Name: "foo", Value: "bar"}}, + Labels: labels.Labels{ + labels.Label{Name: "__address__", Value: "localhost:8080"}, + labels.Label{Name: "__scrape_interval__", Value: "0s"}, + labels.Label{Name: "__scrape_timeout__", Value: "0s"}, + labels.Label{Name: "foo", Value: "bar"}, + labels.Label{Name: "instance", Value: "localhost:8080"}, + labels.Label{Name: "newfoo", Value: "bar"}}, + Error: nil, + }} + + require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig)) +} diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 00228932ad..302113875b 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -335,7 +335,7 @@ func TestPopulateLabels(t *testing.T) { for _, c := range cases { in := c.in.Copy() - res, orig, err := populateLabels(c.in, c.cfg) + res, orig, err := PopulateLabels(c.in, c.cfg) if c.err != "" { require.EqualError(t, err, c.err) } else { diff --git a/scrape/target.go b/scrape/target.go index c791a8a22d..742e306631 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -348,10 +348,10 @@ func (app *timeLimitAppender) Append(ref uint64, lset labels.Labels, t int64, v return ref, nil } -// populateLabels builds a label set from the given label set and scrape configuration. +// PopulateLabels builds a label set from the given label set and scrape configuration. // It returns a label set before relabeling was applied as the second return value. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func populateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { +func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -488,7 +488,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Targe lset := labels.New(lbls...) - lbls, origLabels, err := populateLabels(lset, cfg) + lbls, origLabels, err := PopulateLabels(lset, cfg) if err != nil { failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg)) } From d4c83da6d252d4edfdbd639fb817ebdb8e9ab2e4 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 2 Nov 2021 11:07:15 +0100 Subject: [PATCH 047/161] Release 2.31 (#9639) Signed-off-by: Julien Pivotto --- CHANGELOG.md | 7 ++----- VERSION | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 38b03ceb10..80dd066119 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,6 @@ -## 2.31.0-rc.1 / 2021-10-25 - -* [BUGFIX] UI: Fix assets loading when a prefix is used in `--web.route-prefix` or `--web.external-url`. #9586 - -## 2.31.0-rc.0 / 2021-10-21 +## 2.31.0 / 2021-11-02 +* [CHANGE] UI: Remove standard PromQL editor in favour of the codemirror-based editor. #9452 * [FEATURE] PromQL: Add trigonometric functions and `atan2` binary operator. #9239 #9248 #9515 * [FEATURE] Remote: Add support for exemplar in the remote write receiver endpoint. #9319 #9414 * [FEATURE] SD: Add PuppetDB service discovery. #8883 diff --git a/VERSION b/VERSION index 4e9a8e7368..bafceb320e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.31.0-rc.1 +2.31.0 From 6e1d6edb33f3bbb10b375baa36b937150e1255a7 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 2 Nov 2021 13:58:51 +0100 Subject: [PATCH 048/161] Exclude agent from windows tests (#9645) We are aware of the issue, but while we are working on it, having main tests broken is an annoyance. Signed-off-by: Julien Pivotto --- tsdb/agent/db_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index e1a6106a36..8f6cdc68ab 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -11,6 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows +// +build !windows + package agent import ( From 807f46a1ed2b7e04418f12ed7ec460329e3a1950 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 2 Nov 2021 14:03:35 +0100 Subject: [PATCH 049/161] Gate agent behind a feature flag, valide mode flags (#9620) Signed-off-by: Julien Pivotto --- cmd/prometheus/main.go | 25 ++++++++++---- cmd/prometheus/main_test.go | 67 +++++++++++++++++++++++++++++++++++-- docs/feature_flags.md | 10 ++++++ 3 files changed, 93 insertions(+), 9 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e0b8f9e5f4..c18f6aa554 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -132,7 +132,7 @@ func extractFlagName(pc *kingpin.ParseContext) string { if !ok { continue } - return fc.Model().Name + return "--" + fc.Model().Name } panic("extractFlagName not called from a kingpin PreAction. This is a bug, please report to Prometheus.") } @@ -200,6 +200,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "new-service-discovery-manager": c.enableNewSDManager = true level.Info(logger).Log("msg", "Experimental service discovery manager") + case "agent": + agentMode = true + level.Info(logger).Log("msg", "Experimental agent mode enabled.") case "": continue default: @@ -238,8 +241,6 @@ func main() { a.HelpFlag.Short('h') - a.Flag("agent", "Agent mode.").BoolVar(&agentMode) - a.Flag("config.file", "Prometheus configuration file path."). Default("prometheus.yml").StringVar(&cfg.configFile) @@ -335,16 +336,16 @@ func main() { PreAction(agentOnlySetting()). Default("data-agent/").StringVar(&cfg.localStoragePath) - a.Flag("storage.agent.segment-size", + a.Flag("storage.agent.wal-segment-size", "Size at which to split WAL segment files. Example: 100MB"). PreAction(agentOnlySetting()). Hidden().PlaceHolder("").BytesVar(&cfg.agent.WALSegmentSize) - a.Flag("storage.agent.compression", "Compress the agent WAL."). + a.Flag("storage.agent.wal-compression", "Compress the agent WAL."). PreAction(agentOnlySetting()). Default("true").BoolVar(&cfg.agent.WALCompression) - a.Flag("storage.agent.truncate-frequency", + a.Flag("storage.agent.wal-truncate-frequency", "The frequency at which to truncate the WAL and remove old data."). PreAction(agentOnlySetting()). Hidden().PlaceHolder("").SetValue(&cfg.agent.TruncateFrequency) @@ -434,6 +435,16 @@ func main() { os.Exit(1) } + if agentMode && len(serverOnlyFlags) > 0 { + fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags) + os.Exit(3) + } + + if !agentMode && len(agentOnlyFlags) > 0 { + fmt.Fprintf(os.Stderr, "The following flag(s) can only be used in agent mode: %q", agentOnlyFlags) + os.Exit(3) + } + cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress) if err != nil { fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", cfg.prometheusURL)) @@ -1006,7 +1017,7 @@ func main() { level.Info(logger).Log("msg", "Starting WAL storage ...") if cfg.agent.WALSegmentSize != 0 { if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 { - return errors.New("flag 'storage.agent.segment-size' must be set between 10MB and 256MB") + return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB") } } db, err := agent.Open( diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 6f4d58b94f..86ecce1ac5 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -350,7 +350,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } func TestAgentSuccessfulStartup(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--agent", "--config.file="+agentConfig) + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig) err := prom.Start() require.NoError(t, err) @@ -370,7 +370,7 @@ func TestAgentSuccessfulStartup(t *testing.T) { } func TestAgentStartupWithInvalidConfig(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--agent", "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig) err := prom.Start() require.NoError(t, err) @@ -388,3 +388,66 @@ func TestAgentStartupWithInvalidConfig(t *testing.T) { } require.Equal(t, expectedExitStatus, actualExitStatus) } + +func TestModeSpecificFlags(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + testcases := []struct { + mode string + arg string + exitStatus int + }{ + {"agent", "--storage.agent.path", 0}, + {"server", "--storage.tsdb.path", 0}, + {"server", "--storage.agent.path", 3}, + {"agent", "--storage.tsdb.path", 3}, + } + + for _, tc := range testcases { + t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) { + args := []string{"-test.main", tc.arg, t.TempDir()} + + if tc.mode == "agent" { + args = append(args, "--enable-feature=agent", "--config.file="+agentConfig) + } else { + args = append(args, "--config.file="+promConfig) + } + + prom := exec.Command(promPath, args...) + + // Log stderr in case of failure. + stderr, err := prom.StderrPipe() + require.NoError(t, err) + go func() { + slurp, _ := ioutil.ReadAll(stderr) + t.Log(string(slurp)) + }() + + err = prom.Start() + require.NoError(t, err) + + if tc.exitStatus == 0 { + done := make(chan error, 1) + go func() { done <- prom.Wait() }() + select { + case err := <-done: + t.Errorf("prometheus should be still running: %v", err) + case <-time.After(5 * time.Second): + prom.Process.Kill() + } + return + } + + err = prom.Wait() + require.Error(t, err) + if exitError, ok := err.(*exec.ExitError); ok { + status := exitError.Sys().(syscall.WaitStatus) + require.Equal(t, tc.exitStatus, status.ExitStatus()) + } else { + t.Errorf("unable to retrieve the exit status for prometheus: %v", err) + } + }) + } +} diff --git a/docs/feature_flags.md b/docs/feature_flags.md index adef7fc931..e5683c8ab4 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -86,3 +86,13 @@ issues upstream. In future releases, this new service discovery manager will become the default and this feature flag will be ignored. + +## Prometheus agent + +`--enable-feature=agent` + +When enabled, Prometheus runs in agent mode. The agent mode is limited to +discovery, scrape and remote write. + +This is useful when you do not need to query the Prometheus data locally, but +only from a central [remote endpoint](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). From 1a6c2283a3e37f5ef6e0df78b72e67d3b2995a56 Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Fri, 22 Oct 2021 10:06:44 +0200 Subject: [PATCH 050/161] Format Go source files using 'gofumpt -w -s -extra' Part of #9557 Signed-off-by: Mateusz Gozdek --- cmd/prometheus/main.go | 6 +- cmd/prometheus/main_test.go | 10 +- cmd/promtool/archive.go | 2 +- cmd/promtool/backfill.go | 3 - cmd/promtool/debug.go | 1 - cmd/promtool/main.go | 11 +- cmd/promtool/rules_test.go | 8 +- cmd/promtool/sd_test.go | 11 +- cmd/promtool/tsdb.go | 11 +- cmd/promtool/unittest.go | 1 - config/config.go | 2 +- config/config_test.go | 213 +++-- discovery/aws/ec2.go | 12 +- discovery/aws/lightsail.go | 12 +- discovery/azure/azure.go | 4 +- discovery/consul/consul.go | 4 +- discovery/consul/consul_test.go | 7 +- discovery/digitalocean/mock_test.go | 3 +- discovery/file/file_test.go | 7 +- discovery/hetzner/hcloud.go | 1 + discovery/hetzner/mock_test.go | 6 +- discovery/hetzner/robot.go | 1 + discovery/http/http_test.go | 2 - discovery/kubernetes/client_metrics.go | 10 +- discovery/kubernetes/endpoints_test.go | 2 +- discovery/kubernetes/kubernetes_test.go | 16 +- discovery/kubernetes/node_test.go | 2 +- discovery/legacymanager/manager_test.go | 7 +- discovery/manager_test.go | 11 +- discovery/marathon/marathon.go | 2 - discovery/marathon/marathon_test.go | 76 +- discovery/openstack/hypervisor.go | 6 +- discovery/openstack/hypervisor_test.go | 1 - discovery/openstack/instance.go | 6 +- discovery/openstack/instance_test.go | 1 - discovery/openstack/mock_test.go | 2 +- discovery/openstack/openstack.go | 1 - discovery/scaleway/scaleway.go | 3 +- discovery/targetgroup/targetgroup_test.go | 38 +- discovery/triton/triton.go | 2 +- discovery/triton/triton_test.go | 20 +- discovery/uyuni/uyuni.go | 7 +- discovery/xds/client_test.go | 24 +- discovery/xds/kuma_test.go | 1 - .../examples/custom-sd/adapter-usage/main.go | 5 +- .../examples/custom-sd/adapter/adapter.go | 2 +- .../remote_storage_adapter/graphite/client.go | 2 +- .../graphite/client_test.go | 12 +- .../remote_storage_adapter/influxdb/client.go | 2 +- .../opentsdb/client_test.go | 12 +- notifier/notifier.go | 1 - notifier/notifier_test.go | 7 +- pkg/labels/labels_test.go | 2 +- pkg/logging/file.go | 10 +- pkg/runtime/statfs_default.go | 1 - pkg/runtime/statfs_linux_386.go | 1 - pkg/runtime/statfs_uint32.go | 1 - pkg/textparse/promparse_test.go | 5 +- promql/engine.go | 12 +- promql/engine_test.go | 787 ++++++++++-------- promql/functions.go | 2 +- promql/parser/ast.go | 13 +- promql/parser/generated_parser.y.go | 162 ++-- promql/parser/lex_test.go | 34 +- promql/parser/parse.go | 10 +- promql/parser/parse_test.go | 771 +++++++++++------ promql/query_logger.go | 6 +- rules/alerting.go | 4 +- rules/manager.go | 2 - rules/manager_test.go | 11 +- scrape/scrape_test.go | 17 +- storage/buffer.go | 2 +- storage/merge.go | 8 +- storage/merge_test.go | 3 +- storage/remote/queue_manager.go | 8 +- storage/remote/queue_manager_test.go | 4 +- storage/remote/read_test.go | 1 - storage/remote/write_handler.go | 4 +- template/template.go | 4 +- template/template_test.go | 21 +- tsdb/agent/db.go | 4 +- tsdb/agent/series.go | 2 +- tsdb/block.go | 12 +- tsdb/block_test.go | 4 +- tsdb/chunks/chunks.go | 12 +- tsdb/chunks/head_chunks.go | 11 +- tsdb/chunks/head_chunks_other.go | 8 +- tsdb/chunks/head_chunks_test.go | 3 +- tsdb/chunks/head_chunks_windows.go | 8 +- tsdb/compact.go | 2 +- tsdb/db.go | 10 +- tsdb/db_test.go | 15 +- tsdb/encoding/encoding.go | 1 - tsdb/exemplar.go | 2 +- tsdb/exemplar_test.go | 4 +- tsdb/fileutil/fileutil.go | 6 +- tsdb/fileutil/flock.go | 2 +- tsdb/fileutil/flock_plan9.go | 2 +- tsdb/fileutil/flock_solaris.go | 2 +- tsdb/fileutil/flock_unix.go | 2 +- tsdb/head.go | 3 +- tsdb/head_test.go | 11 +- tsdb/head_wal.go | 13 +- tsdb/index/index.go | 7 +- tsdb/index/index_test.go | 5 +- tsdb/index/postings_test.go | 17 +- tsdb/index/postingsstats.go | 1 - tsdb/index/postingsstats_test.go | 3 +- tsdb/querier.go | 4 +- tsdb/querier_test.go | 10 +- tsdb/record/record.go | 12 +- tsdb/repair_test.go | 2 +- tsdb/test/hash_test.go | 1 - tsdb/tombstones/tombstones_test.go | 2 +- tsdb/tsdbutil/buffer.go | 2 +- tsdb/tsdbutil/chunks.go | 2 +- tsdb/wal.go | 4 +- tsdb/wal/checkpoint.go | 2 +- tsdb/wal/checkpoint_test.go | 31 +- tsdb/wal/reader_test.go | 177 ++-- tsdb/wal/wal.go | 6 +- tsdb/wal/wal_test.go | 8 +- tsdb/wal/watcher_test.go | 22 +- tsdb/wal_test.go | 8 +- util/osutil/hostname.go | 2 - util/strutil/strconv.go | 4 +- util/testutil/roundtrip.go | 4 +- web/api/v1/api.go | 9 +- web/api/v1/api_test.go | 37 +- web/web.go | 1 - 130 files changed, 1697 insertions(+), 1350 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index c18f6aa554..62796f9b6f 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -664,7 +664,7 @@ func main() { ) // This is passed to ruleManager.Update(). - var externalURL = cfg.web.ExternalURL.String() + externalURL := cfg.web.ExternalURL.String() reloaders := []reloader{ { @@ -896,7 +896,6 @@ func main() { return nil } } - }, func(err error) { // Wait for any in-progress reloads to complete to avoid @@ -1146,6 +1145,7 @@ type safePromQLNoStepSubqueryInterval struct { func durationToInt64Millis(d time.Duration) int64 { return int64(d / time.Millisecond) } + func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) { i.value.Store(durationToInt64Millis(time.Duration(ev))) } @@ -1159,7 +1159,7 @@ type reloader struct { reloader func(*config.Config) error } -func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { +func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { start := time.Now() timings := []interface{}{} level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 86ecce1ac5..e6c8fcc07c 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -35,10 +35,12 @@ import ( "github.com/prometheus/prometheus/rules" ) -var promPath = os.Args[0] -var promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml") -var agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml") -var promData = filepath.Join(os.TempDir(), "data") +var ( + promPath = os.Args[0] + promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml") + agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml") + promData = filepath.Join(os.TempDir(), "data") +) func TestMain(m *testing.M) { for i, arg := range os.Args { diff --git a/cmd/promtool/archive.go b/cmd/promtool/archive.go index 520c26b63f..cca148cb4d 100644 --- a/cmd/promtool/archive.go +++ b/cmd/promtool/archive.go @@ -21,7 +21,7 @@ import ( "github.com/pkg/errors" ) -const filePerm = 0666 +const filePerm = 0o666 type tarGzFileWriter struct { tarWriter *tar.Writer diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index db6eddcd64..b626743dea 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -105,7 +105,6 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn // The next sample is not in this timerange, we can avoid parsing // the file for this timerange. continue - } nextSampleTs = math.MaxInt64 @@ -207,13 +206,11 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn return nil }() - if err != nil { return errors.Wrap(err, "process blocks") } } return nil - } func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) { diff --git a/cmd/promtool/debug.go b/cmd/promtool/debug.go index 280a0d44b2..23d613bb06 100644 --- a/cmd/promtool/debug.go +++ b/cmd/promtool/debug.go @@ -57,7 +57,6 @@ func debugWrite(cfg debugWriterConfig) error { return errors.Wrap(err, "error writing into the archive") } } - } if err := archiver.close(); err != nil { diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index d49892d2d3..5ba483566a 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -257,7 +257,7 @@ func main() { case tsdbDumpCmd.FullCommand(): os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime))) - //TODO(aSquare14): Work on adding support for custom block size. + // TODO(aSquare14): Work on adding support for custom block size. case openMetricsImportCmd.FullCommand(): os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) @@ -560,7 +560,6 @@ func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType { var rules compareRuleTypes for _, group := range groups { - for _, rule := range group.Rules { rules = append(rules, compareRuleType{ metric: ruleMetric(rule), @@ -774,7 +773,7 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer) } // QueryLabels queries for label values against a Prometheus server. -func QueryLabels(url *url.URL, name string, start, end string, p printer) int { +func QueryLabels(url *url.URL, name, start, end string, p printer) int { if url.Scheme == "" { url.Scheme = "http" } @@ -952,11 +951,13 @@ type promqlPrinter struct{} func (p *promqlPrinter) printValue(v model.Value) { fmt.Println(v) } + func (p *promqlPrinter) printSeries(val []model.LabelSet) { for _, v := range val { fmt.Println(v) } } + func (p *promqlPrinter) printLabelValues(val model.LabelValues) { for _, v := range val { fmt.Println(v) @@ -969,10 +970,12 @@ func (j *jsonPrinter) printValue(v model.Value) { //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) } + func (j *jsonPrinter) printSeries(v []model.LabelSet) { //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) } + func (j *jsonPrinter) printLabelValues(v model.LabelValues) { //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) @@ -980,7 +983,7 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) { // importRules backfills recording rules from the files provided. The output are blocks of data // at the outputDir location. -func importRules(url *url.URL, start, end, outputDir string, evalInterval time.Duration, maxBlockDuration time.Duration, files ...string) error { +func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error { ctx := context.Background() var stime, etime time.Time var err error diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index d1f8938b98..868dff984f 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -54,7 +54,7 @@ func TestBackfillRuleIntegration(t *testing.T) { twentyFourHourDuration, _ = time.ParseDuration("24h") ) - var testCases = []struct { + testCases := []struct { name string runcount int maxBlockDuration time.Duration @@ -192,7 +192,7 @@ func createSingleRuleTestFiles(path string) error { labels: testlabel11: testlabelvalue11 ` - return ioutil.WriteFile(path, []byte(recordingRules), 0777) + return ioutil.WriteFile(path, []byte(recordingRules), 0o777) } func createMultiRuleTestFiles(path string) error { @@ -212,7 +212,7 @@ func createMultiRuleTestFiles(path string) error { labels: testlabel11: testlabelvalue13 ` - return ioutil.WriteFile(path, []byte(recordingRules), 0777) + return ioutil.WriteFile(path, []byte(recordingRules), 0o777) } // TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics @@ -244,7 +244,7 @@ func TestBackfillLabels(t *testing.T) { labels: name1: value-from-rule ` - require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0777)) + require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0o777)) errs := ruleImporter.loadGroups(ctx, []string{path}) for _, err := range errs { require.NoError(t, err) diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go index e985197b6f..d9d9744812 100644 --- a/cmd/promtool/sd_test.go +++ b/cmd/promtool/sd_test.go @@ -46,21 +46,24 @@ func TestSDCheckResult(t *testing.T) { } expectedSDCheckResult := []sdCheckResult{ - sdCheckResult{ + { DiscoveredLabels: labels.Labels{ labels.Label{Name: "__address__", Value: "localhost:8080"}, labels.Label{Name: "__scrape_interval__", Value: "0s"}, labels.Label{Name: "__scrape_timeout__", Value: "0s"}, - labels.Label{Name: "foo", Value: "bar"}}, + labels.Label{Name: "foo", Value: "bar"}, + }, Labels: labels.Labels{ labels.Label{Name: "__address__", Value: "localhost:8080"}, labels.Label{Name: "__scrape_interval__", Value: "0s"}, labels.Label{Name: "__scrape_timeout__", Value: "0s"}, labels.Label{Name: "foo", Value: "bar"}, labels.Label{Name: "instance", Value: "localhost:8080"}, - labels.Label{Name: "newfoo", Value: "bar"}}, + labels.Label{Name: "newfoo", Value: "bar"}, + }, Error: nil, - }} + }, + } require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig)) } diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 45cde87cf8..5e77802a98 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -17,7 +17,6 @@ import ( "bufio" "context" "fmt" - "github.com/prometheus/prometheus/tsdb/index" "io" "io/ioutil" "math" @@ -32,6 +31,8 @@ import ( "text/tabwriter" "time" + "github.com/prometheus/prometheus/tsdb/index" + "github.com/alecthomas/units" "github.com/go-kit/log" "github.com/pkg/errors" @@ -78,7 +79,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err if err := os.RemoveAll(b.outPath); err != nil { return err } - if err := os.MkdirAll(b.outPath, 0777); err != nil { + if err := os.MkdirAll(b.outPath, 0o777); err != nil { return err } @@ -589,7 +590,7 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err histogram := make([]int, nBuckets) totalChunks := 0 for postingsr.Next() { - var lbsl = labels.Labels{} + lbsl := labels.Labels{} var chks []chunks.Meta if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil { return err @@ -671,14 +672,14 @@ func checkErr(err error) int { return 0 } -func backfillOpenMetrics(path string, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { +func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { inputFile, err := fileutil.OpenMmapFile(path) if err != nil { return checkErr(err) } defer inputFile.Close() - if err := os.MkdirAll(outputDir, 0777); err != nil { + if err := os.MkdirAll(outputDir, 0o777); err != nil { return checkErr(errors.Wrap(err, "create output dir")) } diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 5dcc5bb56c..6378463030 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -387,7 +387,6 @@ Outer: // seriesLoadingString returns the input series in PromQL notation. func (tg *testGroup) seriesLoadingString() string { - result := fmt.Sprintf("load %v\n", shortDuration(tg.Interval)) for _, is := range tg.InputSeries { result += fmt.Sprintf(" %v %v\n", is.Series, is.Values) diff --git a/config/config.go b/config/config.go index 9729642ae0..a247898a9d 100644 --- a/config/config.go +++ b/config/config.go @@ -99,7 +99,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro } // LoadFile parses the given YAML file into a Config. -func LoadFile(filename string, agentMode bool, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { content, err := ioutil.ReadFile(filename) if err != nil { return nil, err diff --git a/config/config_test.go b/config/config_test.go index f26e4f3e66..4fe5731a2b 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -784,17 +784,19 @@ var expectedConf = &Config{ Scheme: DefaultScrapeConfig.Scheme, HTTPClientConfig: config.DefaultHTTPClientConfig, - ServiceDiscoveryConfigs: discovery.Configs{&openstack.SDConfig{ - Role: "instance", - Region: "RegionOne", - Port: 80, - Availability: "public", - RefreshInterval: model.Duration(60 * time.Second), - TLSConfig: config.TLSConfig{ - CAFile: "testdata/valid_ca_file", - CertFile: "testdata/valid_cert_file", - KeyFile: "testdata/valid_key_file", - }}, + ServiceDiscoveryConfigs: discovery.Configs{ + &openstack.SDConfig{ + Role: "instance", + Region: "RegionOne", + Port: 80, + Availability: "public", + RefreshInterval: model.Duration(60 * time.Second), + TLSConfig: config.TLSConfig{ + CAFile: "testdata/valid_ca_file", + CertFile: "testdata/valid_cert_file", + KeyFile: "testdata/valid_key_file", + }, + }, }, }, { @@ -808,22 +810,23 @@ var expectedConf = &Config{ Scheme: DefaultScrapeConfig.Scheme, HTTPClientConfig: config.DefaultHTTPClientConfig, - ServiceDiscoveryConfigs: discovery.Configs{&puppetdb.SDConfig{ - URL: "https://puppetserver/", - Query: "resources { type = \"Package\" and title = \"httpd\" }", - IncludeParameters: true, - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - HTTPClientConfig: config.HTTPClientConfig{ - FollowRedirects: true, - TLSConfig: config.TLSConfig{ - CAFile: "testdata/valid_ca_file", - CertFile: "testdata/valid_cert_file", - KeyFile: "testdata/valid_key_file", + ServiceDiscoveryConfigs: discovery.Configs{ + &puppetdb.SDConfig{ + URL: "https://puppetserver/", + Query: "resources { type = \"Package\" and title = \"httpd\" }", + IncludeParameters: true, + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + TLSConfig: config.TLSConfig{ + CAFile: "testdata/valid_ca_file", + CertFile: "testdata/valid_cert_file", + KeyFile: "testdata/valid_key_file", + }, }, }, }, - }, }, { JobName: "hetzner", @@ -1086,170 +1089,224 @@ var expectedErrors = []struct { { filename: "jobname.bad.yml", errMsg: `job_name is empty`, - }, { + }, + { filename: "jobname_dup.bad.yml", errMsg: `found multiple scrape configs with job name "prometheus"`, - }, { + }, + { filename: "scrape_interval.bad.yml", errMsg: `scrape timeout greater than scrape interval`, - }, { + }, + { filename: "labelname.bad.yml", errMsg: `"not$allowed" is not a valid label name`, - }, { + }, + { filename: "labelname2.bad.yml", errMsg: `"not:allowed" is not a valid label name`, - }, { + }, + { filename: "labelvalue.bad.yml", errMsg: `"\xff" is not a valid label value`, - }, { + }, + { filename: "regex.bad.yml", errMsg: "error parsing regexp", - }, { + }, + { filename: "modulus_missing.bad.yml", errMsg: "relabel configuration for hashmod requires non-zero modulus", - }, { + }, + { filename: "labelkeep.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep2.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep3.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep4.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep5.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop2.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop3.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop4.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop5.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelmap.bad.yml", errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action", - }, { + }, + { filename: "rules.bad.yml", errMsg: "invalid rule file path", - }, { + }, + { filename: "unknown_attr.bad.yml", errMsg: "field consult_sd_configs not found in type", - }, { + }, + { filename: "bearertoken.bad.yml", errMsg: "at most one of bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "bearertoken_basicauth.bad.yml", errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "kubernetes_http_config_without_api_server.bad.yml", errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly", - }, { + }, + { filename: "kubernetes_kubeconfig_with_apiserver.bad.yml", errMsg: "cannot use 'kubeconfig_file' and 'api_server' simultaneously", - }, { + }, + { filename: "kubernetes_kubeconfig_with_http_config.bad.yml", errMsg: "cannot use a custom HTTP client configuration together with 'kubeconfig_file'", }, { filename: "kubernetes_bearertoken.bad.yml", errMsg: "at most one of bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "kubernetes_role.bad.yml", errMsg: "role", - }, { + }, + { filename: "kubernetes_selectors_endpoints.bad.yml", errMsg: "endpoints role supports only pod, service, endpoints selectors", - }, { + }, + { filename: "kubernetes_selectors_ingress.bad.yml", errMsg: "ingress role supports only ingress selectors", - }, { + }, + { filename: "kubernetes_selectors_node.bad.yml", errMsg: "node role supports only node selectors", - }, { + }, + { filename: "kubernetes_selectors_pod.bad.yml", errMsg: "pod role supports only pod selectors", - }, { + }, + { filename: "kubernetes_selectors_service.bad.yml", errMsg: "service role supports only service selectors", - }, { + }, + { filename: "kubernetes_namespace_discovery.bad.yml", errMsg: "field foo not found in type kubernetes.plain", - }, { + }, + { filename: "kubernetes_selectors_duplicated_role.bad.yml", errMsg: "duplicated selector role: pod", - }, { + }, + { filename: "kubernetes_selectors_incorrect_selector.bad.yml", errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'", - }, { + }, + { filename: "kubernetes_bearertoken_basicauth.bad.yml", errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "kubernetes_authorization_basicauth.bad.yml", errMsg: "at most one of basic_auth, oauth2 & authorization must be configured", - }, { + }, + { filename: "marathon_no_servers.bad.yml", errMsg: "marathon_sd: must contain at least one Marathon server", - }, { + }, + { filename: "marathon_authtoken_authtokenfile.bad.yml", errMsg: "marathon_sd: at most one of auth_token & auth_token_file must be configured", - }, { + }, + { filename: "marathon_authtoken_basicauth.bad.yml", errMsg: "marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured", - }, { + }, + { filename: "marathon_authtoken_bearertoken.bad.yml", errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured", - }, { + }, + { filename: "marathon_authtoken_authorization.bad.yml", errMsg: "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured", - }, { + }, + { filename: "openstack_role.bad.yml", errMsg: "unknown OpenStack SD role", - }, { + }, + { filename: "openstack_availability.bad.yml", errMsg: "unknown availability invalid, must be one of admin, internal or public", - }, { + }, + { filename: "url_in_targetgroup.bad.yml", errMsg: "\"http://bad\" is not a valid hostname", - }, { + }, + { filename: "target_label_missing.bad.yml", errMsg: "relabel configuration for replace action requires 'target_label' value", - }, { + }, + { filename: "target_label_hashmod_missing.bad.yml", errMsg: "relabel configuration for hashmod action requires 'target_label' value", - }, { + }, + { filename: "unknown_global_attr.bad.yml", errMsg: "field nonexistent_field not found in type config.plain", - }, { + }, + { filename: "remote_read_url_missing.bad.yml", errMsg: `url for remote_read is empty`, - }, { + }, + { filename: "remote_write_header.bad.yml", errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`, - }, { + }, + { filename: "remote_read_header.bad.yml", errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`, - }, { + }, + { filename: "remote_write_authorization_header.bad.yml", errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`, - }, { + }, + { filename: "remote_write_url_missing.bad.yml", errMsg: `url for remote_write is empty`, - }, { + }, + { filename: "remote_write_dup.bad.yml", errMsg: `found multiple remote write configs with job name "queue1"`, - }, { + }, + { filename: "remote_read_dup.bad.yml", errMsg: `found multiple remote read configs with job name "queue1"`, }, diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index 0bcfd05476..8984035e24 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -63,13 +63,11 @@ const ( ec2LabelSeparator = "," ) -var ( - // DefaultEC2SDConfig is the default EC2 SD configuration. - DefaultEC2SDConfig = EC2SDConfig{ - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - } -) +// DefaultEC2SDConfig is the default EC2 SD configuration. +var DefaultEC2SDConfig = EC2SDConfig{ + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), +} func init() { discovery.RegisterConfig(&EC2SDConfig{}) diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index e3dc65b5d6..e5165776f6 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -53,13 +53,11 @@ const ( lightsailLabelSeparator = "," ) -var ( - // DefaultLightsailSDConfig is the default Lightsail SD configuration. - DefaultLightsailSDConfig = LightsailSDConfig{ - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - } -) +// DefaultLightsailSDConfig is the default Lightsail SD configuration. +var DefaultLightsailSDConfig = LightsailSDConfig{ + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), +} func init() { discovery.RegisterConfig(&LightsailSDConfig{}) diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index c81eb22053..3e7cd4e122 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -339,7 +339,6 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { // Get the IP address information via separate call to the network provider. for _, nicID := range vm.NetworkInterfaces { networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID) - if err != nil { level.Error(d.logger).Log("msg", "Unable to get network interface", "name", nicID, "err", err) ch <- target{labelSet: nil, err: err} @@ -437,9 +436,8 @@ func (client *azureClient) getScaleSets(ctx context.Context) ([]compute.VirtualM func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) { var vms []virtualMachine - //TODO do we really need to fetch the resourcegroup this way? + // TODO do we really need to fetch the resourcegroup this way? r, err := newAzureResourceFromID(*scaleSet.ID, nil) - if err != nil { return nil, errors.Wrap(err, "could not parse scale set ID") } diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 16d0d26282..0aef808d5d 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -54,7 +54,7 @@ const ( healthLabel = model.MetaLabelPrefix + "consul_health" // serviceAddressLabel is the name of the label containing the (optional) service address. serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" - //servicePortLabel is the name of the label containing the service port. + // servicePortLabel is the name of the label containing the service port. servicePortLabel = model.MetaLabelPrefix + "consul_service_port" // datacenterLabel is the name of the label containing the datacenter ID. datacenterLabel = model.MetaLabelPrefix + "consul_dc" @@ -530,7 +530,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr for _, serviceNode := range serviceNodes { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. - var tags = srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator + tags := srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator // If the service address is not empty it should be used instead of the node address // since the service may be registered remotely through a different node. diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index 15092c3041..986c6a4d22 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -37,9 +37,9 @@ func TestMain(m *testing.M) { func TestConfiguredService(t *testing.T) { conf := &SDConfig{ - Services: []string{"configuredServiceName"}} + Services: []string{"configuredServiceName"}, + } consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -57,7 +57,6 @@ func TestConfiguredServiceWithTag(t *testing.T) { ServiceTags: []string{"http"}, } consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -153,7 +152,6 @@ func TestConfiguredServiceWithTags(t *testing.T) { for _, tc := range cases { consulDiscovery, err := NewDiscovery(tc.conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -168,7 +166,6 @@ func TestConfiguredServiceWithTags(t *testing.T) { func TestNonConfiguredService(t *testing.T) { conf := &SDConfig{} consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } diff --git a/discovery/digitalocean/mock_test.go b/discovery/digitalocean/mock_test.go index edbdf92ba0..2f19b5e1a1 100644 --- a/discovery/digitalocean/mock_test.go +++ b/discovery/digitalocean/mock_test.go @@ -75,7 +75,8 @@ func (m *SDMock) HandleDropletsList() { panic(err) } } - fmt.Fprint(w, []string{` + fmt.Fprint(w, []string{ + ` { "droplets": [ { diff --git a/discovery/file/file_test.go b/discovery/file/file_test.go index bf50fc257f..4a9f8c26f0 100644 --- a/discovery/file/file_test.go +++ b/discovery/file/file_test.go @@ -73,7 +73,7 @@ func (t *testRunner) copyFile(src string) string { } // copyFileTo atomically copies a file with a different name to the runner's directory. -func (t *testRunner) copyFileTo(src string, name string) string { +func (t *testRunner) copyFileTo(src, name string) string { t.Helper() newf, err := ioutil.TempFile(t.dir, "") @@ -95,7 +95,7 @@ func (t *testRunner) copyFileTo(src string, name string) string { } // writeString writes atomically a string to a file. -func (t *testRunner) writeString(file string, data string) { +func (t *testRunner) writeString(file, data string) { t.Helper() newf, err := ioutil.TempFile(t.dir, "") @@ -477,6 +477,7 @@ func TestRemoveFile(t *testing.T) { }, { Source: fileSource(sdFile, 1), - }}, + }, + }, ) } diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index 35492e6a0a..aa406a1a7a 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -78,6 +78,7 @@ func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, er ) return d, nil } + func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { servers, err := d.client.Server.All(ctx) if err != nil { diff --git a/discovery/hetzner/mock_test.go b/discovery/hetzner/mock_test.go index 5936d52574..ecf3132742 100644 --- a/discovery/hetzner/mock_test.go +++ b/discovery/hetzner/mock_test.go @@ -489,8 +489,10 @@ func (m *SDMock) HandleHcloudNetworks() { }) } -const robotTestUsername = "my-hetzner" -const robotTestPassword = "my-password" +const ( + robotTestUsername = "my-hetzner" + robotTestPassword = "my-password" +) // HandleRobotServers mocks the robot servers list endpoint. func (m *SDMock) HandleRobotServers() { diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 25f04502f1..d0f3e4d948 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -70,6 +70,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro return d, nil } + func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { req, err := http.NewRequest("GET", d.endpoint+"/server", nil) if err != nil { diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index e9dc98b8f2..b7a0369e70 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -60,7 +60,6 @@ func TestHTTPValidRefresh(t *testing.T) { }, } require.Equal(t, tgs, expectedTargets) - } func TestHTTPInvalidCode(t *testing.T) { @@ -398,5 +397,4 @@ func TestSourceDisappeared(t *testing.T) { require.Equal(t, test.expectedTargets[i], tgs) } } - } diff --git a/discovery/kubernetes/client_metrics.go b/discovery/kubernetes/client_metrics.go index 45e249be21..3a33e3e8d5 100644 --- a/discovery/kubernetes/client_metrics.go +++ b/discovery/kubernetes/client_metrics.go @@ -121,9 +121,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer clientGoRequestLatencyMetricVec, ) } -func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code string, method string, host string) { + +func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) { clientGoRequestResultMetricVec.WithLabelValues(code).Inc() } + func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) { clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) } @@ -146,21 +148,27 @@ func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Regist func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { return clientGoWorkqueueDepthMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { return clientGoWorkqueueAddsMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) } + func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { // Retries are not used so the metric is omitted. return noopMetric{} diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index e73d8ba066..335242c3c0 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -27,7 +27,7 @@ import ( ) func makeEndpoints() *v1.Endpoints { - var nodeName = "foobar" + nodeName := "foobar" return &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index e1ca23402e..2da8ec546c 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -171,13 +171,15 @@ type hasSynced interface { hasSynced() bool } -var _ hasSynced = &Discovery{} -var _ hasSynced = &Node{} -var _ hasSynced = &Endpoints{} -var _ hasSynced = &EndpointSlice{} -var _ hasSynced = &Ingress{} -var _ hasSynced = &Pod{} -var _ hasSynced = &Service{} +var ( + _ hasSynced = &Discovery{} + _ hasSynced = &Node{} + _ hasSynced = &Endpoints{} + _ hasSynced = &EndpointSlice{} + _ hasSynced = &Ingress{} + _ hasSynced = &Pod{} + _ hasSynced = &Service{} +) func (d *Discovery) hasSynced() bool { d.RLock() diff --git a/discovery/kubernetes/node_test.go b/discovery/kubernetes/node_test.go index 4a08eefc85..afdaaf6b2b 100644 --- a/discovery/kubernetes/node_test.go +++ b/discovery/kubernetes/node_test.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node { +func makeNode(name, address string, labels, annotations map[string]string) *v1.Node { return &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: name, diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index a977fcd292..ce2278d231 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -37,7 +37,6 @@ func TestMain(m *testing.M) { // TestTargetUpdatesOrder checks that the target updates are received in the expected order. func TestTargetUpdatesOrder(t *testing.T) { - // The order by which the updates are send is determined by the interval passed to the mock discovery adapter // Final targets array is ordered alphabetically by the name of the discoverer. // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. @@ -117,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T) { { Source: "tp1_group2", Targets: []model.LabelSet{{"__instance__": "2"}}, - }}, + }, + }, }, }, }, @@ -729,14 +729,12 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou match := false var mergedTargets string for _, targetGroup := range tSets[poolKey] { - for _, l := range targetGroup.Targets { mergedTargets = mergedTargets + " " + l.String() if l.String() == label { match = true } } - } if match != present { msg := "" @@ -926,7 +924,6 @@ func TestGaugeFailedConfigs(t *testing.T) { if failedCount != 0 { t.Fatalf("Expected to get no failed config, got: %v", failedCount) } - } func TestCoordinationWithReceiver(t *testing.T) { diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 657130e5e9..37d57f94ce 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -37,7 +37,6 @@ func TestMain(m *testing.M) { // TestTargetUpdatesOrder checks that the target updates are received in the expected order. func TestTargetUpdatesOrder(t *testing.T) { - // The order by which the updates are send is determined by the interval passed to the mock discovery adapter // Final targets array is ordered alphabetically by the name of the discoverer. // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. @@ -117,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T) { { Source: "tp1_group2", Targets: []model.LabelSet{{"__instance__": "2"}}, - }}, + }, + }, }, }, }, @@ -719,7 +719,7 @@ func staticConfig(addrs ...string) StaticConfig { return cfg } -func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key string, label string, present bool) { +func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key, label string, present bool) { t.Helper() if _, ok := tGroups[key]; !ok { t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups) @@ -734,7 +734,6 @@ func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, match = true } } - } if match != present { msg := "" @@ -755,14 +754,12 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou match := false var mergedTargets string for _, targetGroup := range tSets[poolKey] { - for _, l := range targetGroup.Targets { mergedTargets = mergedTargets + " " + l.String() if l.String() == label { match = true } } - } if match != present { msg := "" @@ -1062,7 +1059,6 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) } - } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { @@ -1179,7 +1175,6 @@ func TestGaugeFailedConfigs(t *testing.T) { if failedCount != 0 { t.Fatalf("Expected to get no failed config, got: %v", failedCount) } - } func TestCoordinationWithReceiver(t *testing.T) { diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 30c32bb5d9..5688efedd5 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -478,7 +478,6 @@ func targetsForApp(app *app) []model.LabelSet { // Generate a target endpoint string in host:port format. func targetEndpoint(task *task, port uint32, containerNet bool) string { - var host string // Use the task's ipAddress field when it's in a container network @@ -493,7 +492,6 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string { // Get a list of ports and a list of labels from a PortMapping. func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32, []map[string]string) { - ports := make([]uint32, len(portMappings)) labels := make([]map[string]string, len(portMappings)) diff --git a/discovery/marathon/marathon_test.go b/discovery/marathon/marathon_test.go index 8ac9713a23..00d066e271 100644 --- a/discovery/marathon/marathon_test.go +++ b/discovery/marathon/marathon_test.go @@ -60,9 +60,7 @@ func TestMarathonSDHandleError(t *testing.T) { } func TestMarathonSDEmptyList(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -99,11 +97,9 @@ func marathonTestAppList(labels map[string]string, runningTasks int) *appList { } func TestMarathonSDSendGroup(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppList(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppList(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -195,11 +191,9 @@ func marathonTestAppListWithMultiplePorts(labels map[string]string, runningTasks } func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -254,11 +248,9 @@ func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int) } func TestMarathonZeroTaskPorts(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -331,11 +323,9 @@ func marathonTestAppListWithPortDefinitions(labels map[string]string, runningTas } func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -403,11 +393,9 @@ func marathonTestAppListWithPortDefinitionsRequirePorts(labels map[string]string } func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -470,11 +458,9 @@ func marathonTestAppListWithPorts(labels map[string]string, runningTasks int) *a } func TestMarathonSDSendGroupWithPorts(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithPorts(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithPorts(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -546,11 +532,9 @@ func marathonTestAppListWithContainerPortMappings(labels map[string]string, runn } func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -622,11 +606,9 @@ func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string } func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -702,11 +684,9 @@ func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]st } func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index 594f4e433e..877b3eb9b0 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -51,8 +51,10 @@ type HypervisorDiscovery struct { // newHypervisorDiscovery returns a new hypervisor discovery. func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, port int, region string, availability gophercloud.Availability, l log.Logger) *HypervisorDiscovery { - return &HypervisorDiscovery{provider: provider, authOpts: opts, - region: region, port: port, availability: availability, logger: l} + return &HypervisorDiscovery{ + provider: provider, authOpts: opts, + region: region, port: port, availability: availability, logger: l, + } } func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go index 7556a88f33..396d5283dc 100644 --- a/discovery/openstack/hypervisor_test.go +++ b/discovery/openstack/hypervisor_test.go @@ -47,7 +47,6 @@ func (s *OpenstackSDHypervisorTestSuite) openstackAuthSuccess() (refresher, erro } func TestOpenstackSDHypervisorRefresh(t *testing.T) { - mock := &OpenstackSDHypervisorTestSuite{} mock.SetupTest(t) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index ab2221f4f9..fa4039beab 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -63,8 +63,10 @@ func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gopherclou if l == nil { l = log.NewNopLogger() } - return &InstanceDiscovery{provider: provider, authOpts: opts, - region: region, port: port, allTenants: allTenants, availability: availability, logger: l} + return &InstanceDiscovery{ + provider: provider, authOpts: opts, + region: region, port: port, allTenants: allTenants, availability: availability, logger: l, + } } type floatingIPKey struct { diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index 8275727a64..d47cb0020e 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -51,7 +51,6 @@ func (s *OpenstackSDInstanceTestSuite) openstackAuthSuccess() (refresher, error) } func TestOpenstackSDInstanceRefresh(t *testing.T) { - mock := &OpenstackSDInstanceTestSuite{} mock.SetupTest(t) diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index f29d647d4d..e64c336482 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -54,7 +54,7 @@ func testMethod(t *testing.T, r *http.Request, expected string) { } } -func testHeader(t *testing.T, r *http.Request, header string, expected string) { +func testHeader(t *testing.T, r *http.Request, header, expected string) { if actual := r.Header.Get(header); expected != actual { t.Errorf("Header %s = %s, expected %s", header, actual, expected) } diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index 2a341976ed..932a4f5c2f 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -145,7 +145,6 @@ func NewDiscovery(conf *SDConfig, l log.Logger) (*refresh.Discovery, error) { time.Duration(conf.RefreshInterval), r.refresh, ), nil - } func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index c8689cb947..920f087a1c 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -173,8 +173,7 @@ func init() { // Discovery periodically performs Scaleway requests. It implements // the Discoverer interface. -type Discovery struct { -} +type Discovery struct{} func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { r, err := newRefresher(conf) diff --git a/discovery/targetgroup/targetgroup_test.go b/discovery/targetgroup/targetgroup_test.go index bf0d99553c..fe9587eb8a 100644 --- a/discovery/targetgroup/targetgroup_test.go +++ b/discovery/targetgroup/targetgroup_test.go @@ -38,7 +38,8 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) { expectedReply: nil, expectedGroup: Group{Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, - {"__address__": "localhost:9091"}}, Labels: model.LabelSet{"my": "label"}}, + {"__address__": "localhost:9091"}, + }, Labels: model.LabelSet{"my": "label"}}, }, { json: ` {"label": {},"targets": []}`, @@ -56,7 +57,6 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) { require.Equal(t, test.expectedReply, actual) require.Equal(t, test.expectedGroup, tg) } - } func TestTargetGroupYamlMarshal(t *testing.T) { @@ -81,10 +81,13 @@ func TestTargetGroupYamlMarshal(t *testing.T) { }, { // targets only exposes addresses. - group: Group{Targets: []model.LabelSet{ - {"__address__": "localhost:9090"}, - {"__address__": "localhost:9091"}}, - Labels: model.LabelSet{"foo": "bar", "bar": "baz"}}, + group: Group{ + Targets: []model.LabelSet{ + {"__address__": "localhost:9090"}, + {"__address__": "localhost:9091"}, + }, + Labels: model.LabelSet{"foo": "bar", "bar": "baz"}, + }, expectedYaml: "targets:\n- localhost:9090\n- localhost:9091\nlabels:\n bar: baz\n foo: bar\n", expectedErr: nil, }, @@ -120,7 +123,8 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) { expectedReply: nil, expectedGroup: Group{Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, - {"__address__": "localhost:9191"}}, Labels: model.LabelSet{"my": "label"}}, + {"__address__": "localhost:9191"}, + }, Labels: model.LabelSet{"my": "label"}}, }, { // incorrect syntax. @@ -135,21 +139,25 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) { require.Equal(t, test.expectedReply, actual) require.Equal(t, test.expectedGroup, tg) } - } func TestString(t *testing.T) { // String() should return only the source, regardless of other attributes. group1 := - Group{Targets: []model.LabelSet{ - {"__address__": "localhost:9090"}, - {"__address__": "localhost:9091"}}, + Group{ + Targets: []model.LabelSet{ + {"__address__": "localhost:9090"}, + {"__address__": "localhost:9091"}, + }, Source: "", - Labels: model.LabelSet{"foo": "bar", "bar": "baz"}} + Labels: model.LabelSet{"foo": "bar", "bar": "baz"}, + } group2 := - Group{Targets: []model.LabelSet{}, - Source: "", - Labels: model.LabelSet{}} + Group{ + Targets: []model.LabelSet{}, + Source: "", + Labels: model.LabelSet{}, + } require.Equal(t, "", group1.String()) require.Equal(t, "", group2.String()) require.Equal(t, group1.String(), group2.String()) diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index 96b9f39b1f..66efd9bbc1 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -190,7 +190,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { default: return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role) } - var endpoint = fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version) + endpoint := fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version) if len(d.sdConfig.Groups) > 0 { groups := url.QueryEscape(strings.Join(d.sdConfig.Groups, ",")) endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups) diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index f83ffcc97e..ca38965322 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -135,8 +135,7 @@ func TestTritonSDRefreshNoTargets(t *testing.T) { } func TestTritonSDRefreshMultipleTargets(t *testing.T) { - var ( - dstr = `{"containers":[ + dstr := `{"containers":[ { "groups":["foo","bar","baz"], "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131", @@ -153,7 +152,6 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) { "vm_uuid":"7b27a514-89d7-11e6-bee6-3f96f367bee7" }] }` - ) tgts := testTritonSDRefresh(t, conf, dstr) require.NotNil(t, tgts) @@ -161,9 +159,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) { } func TestTritonSDRefreshNoServer(t *testing.T) { - var ( - td, _ = newTritonDiscovery(conf) - ) + td, _ := newTritonDiscovery(conf) _, err := td.refresh(context.Background()) require.Error(t, err) @@ -171,9 +167,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) { } func TestTritonSDRefreshCancelled(t *testing.T) { - var ( - td, _ = newTritonDiscovery(conf) - ) + td, _ := newTritonDiscovery(conf) ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -183,8 +177,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) { } func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { - var ( - dstr = `{"cns":[ + dstr := `{"cns":[ { "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131" }, @@ -192,7 +185,6 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { "server_uuid":"a5894692-bd32-4ca1-908a-e2dda3c3a5e6" }] }` - ) tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) @@ -200,8 +192,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { } func TestTritonSDRefreshCNsWithHostname(t *testing.T) { - var ( - dstr = `{"cns":[ + dstr := `{"cns":[ { "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131", "server_hostname": "server01" @@ -211,7 +202,6 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) { "server_hostname": "server02" }] }` - ) tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index a6b2b6fce0..df8efeeaf2 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -119,7 +119,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) - if err != nil { return err } @@ -141,7 +140,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -func login(rpcclient *xmlrpc.Client, user string, pass string) (string, error) { +func login(rpcclient *xmlrpc.Client, user, pass string) (string, error) { var result string err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result) return result, err @@ -151,7 +150,7 @@ func logout(rpcclient *xmlrpc.Client, token string) error { return rpcclient.Call("auth.logout", token, nil) } -func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token string, entitlement string) (map[int][]systemGroupID, error) { +func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, entitlement string) (map[int][]systemGroupID, error) { var systemGroupsInfos []struct { SystemID int `xmlrpc:"id"` SystemGroups []systemGroupID `xmlrpc:"system_groups"` @@ -234,7 +233,6 @@ func (d *Discovery) getEndpointLabels( systemGroupIDs []systemGroupID, networkInfo networkInfo, ) model.LabelSet { - var addr, scheme string managedGroupNames := getSystemGroupNames(systemGroupIDs) addr = fmt.Sprintf("%s:%d", networkInfo.Hostname, endpoint.Port) @@ -274,7 +272,6 @@ func (d *Discovery) getTargetsForSystems( token string, entitlement string, ) ([]model.LabelSet, error) { - result := make([]model.LabelSet, 0) systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, token, entitlement) diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index 1c0e321d33..fda7ba3201 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -26,22 +26,19 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) -var ( - httpResourceConf = &HTTPResourceClientConfig{ - HTTPClientConfig: config.HTTPClientConfig{ - TLSConfig: config.TLSConfig{InsecureSkipVerify: true}, - }, - ResourceType: "monitoring", - // Some known type. - ResourceTypeURL: "type.googleapis.com/envoy.service.discovery.v3.DiscoveryRequest", - Server: "http://localhost", - ClientID: "test-id", - } -) +var httpResourceConf = &HTTPResourceClientConfig{ + HTTPClientConfig: config.HTTPClientConfig{ + TLSConfig: config.TLSConfig{InsecureSkipVerify: true}, + }, + ResourceType: "monitoring", + // Some known type. + ResourceTypeURL: "type.googleapis.com/envoy.service.discovery.v3.DiscoveryRequest", + Server: "http://localhost", + ClientID: "test-id", +} func urlMustParse(str string) *url.URL { parsed, err := url.Parse(str) - if err != nil { panic(err) } @@ -92,7 +89,6 @@ func TestCreateNewHTTPResourceClient(t *testing.T) { require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1") require.Equal(t, client.client.Timeout, 1*time.Minute) - } func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) { diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index 0de1b986d2..b62e9a0ef9 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -91,7 +91,6 @@ func getKumaMadsV1DiscoveryResponse(resources ...*MonitoringAssignment) (*v3.Dis serialized := make([]*anypb.Any, len(resources)) for i, res := range resources { data, err := proto.Marshal(res) - if err != nil { return nil, err } diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index 2214a823d9..80a8f1b5ed 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -50,7 +50,7 @@ var ( tagsLabel = model.MetaLabelPrefix + "consul_tags" // serviceAddressLabel is the name of the label containing the (optional) service address. serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" - //servicePortLabel is the name of the label containing the service port. + // servicePortLabel is the name of the label containing the service port. servicePortLabel = model.MetaLabelPrefix + "consul_service_port" // serviceIDLabel is the name of the label containing the service ID. serviceIDLabel = model.MetaLabelPrefix + "consul_service_id" @@ -120,7 +120,7 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target for _, node := range nodes { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. - var tags = "," + strings.Join(node.ServiceTags, ",") + "," + tags := "," + strings.Join(node.ServiceTags, ",") + "," // If the service address is not empty it should be used instead of the node address // since the service may be registered remotely through a different node. @@ -162,7 +162,6 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { for c := time.Tick(time.Duration(d.refreshInterval) * time.Second); ; { var srvs map[string][]string resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address)) - if err != nil { level.Error(d.logger).Log("msg", "Error getting services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) diff --git a/documentation/examples/custom-sd/adapter/adapter.go b/documentation/examples/custom-sd/adapter/adapter.go index e355e7b714..564a4e83b5 100644 --- a/documentation/examples/custom-sd/adapter/adapter.go +++ b/documentation/examples/custom-sd/adapter/adapter.go @@ -163,7 +163,7 @@ func (a *Adapter) Run() { } // NewAdapter creates a new instance of Adapter. -func NewAdapter(ctx context.Context, file string, name string, d discovery.Discoverer, logger log.Logger) *Adapter { +func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger) *Adapter { return &Adapter{ ctx: ctx, disc: d, diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go index fedea48dbc..36242a8f4d 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go @@ -37,7 +37,7 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, address string, transport string, timeout time.Duration, prefix string) *Client { +func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client { if logger == nil { logger = log.NewNopLogger() } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go index 68844d3273..535027e076 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go @@ -20,13 +20,11 @@ import ( "github.com/stretchr/testify/require" ) -var ( - metric = model.Metric{ - model.MetricNameLabel: "test:metric", - "testlabel": "test:value", - "many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\", - } -) +var metric = model.Metric{ + model.MetricNameLabel: "test:metric", + "testlabel": "test:value", + "many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\", +} func TestEscape(t *testing.T) { // Can we correctly keep and escape valid chars. diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index 061c2e76dc..f0ee58c5a2 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -41,7 +41,7 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, conf influx.HTTPConfig, db string, rp string) *Client { +func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client { c, err := influx.NewHTTPClient(conf) // Currently influx.NewClient() *should* never return an error. if err != nil { diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go index 51906efcef..a30448e766 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go @@ -21,13 +21,11 @@ import ( "github.com/stretchr/testify/require" ) -var ( - metric = model.Metric{ - model.MetricNameLabel: "test:metric", - "testlabel": "test:value", - "many_chars": "abc!ABC:012-3!45ö67~89./", - } -) +var metric = model.Metric{ + model.MetricNameLabel: "test:metric", + "testlabel": "test:value", + "many_chars": "abc!ABC:012-3!45ö67~89./", +} func TestTagsFromMetric(t *testing.T) { expected := map[string]TagValue{ diff --git a/notifier/notifier.go b/notifier/notifier.go index 19a7f0bf9a..d378beb604 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -303,7 +303,6 @@ func (n *Manager) nextBatch() []*Alert { // Run dispatches notifications continuously. func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { - for { select { case <-n.ctx.Done(): diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index b04b07e947..807c6bc7f4 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -40,7 +40,7 @@ import ( ) func TestPostPath(t *testing.T) { - var cases = []struct { + cases := []struct { in, out string }{ { @@ -456,7 +456,7 @@ func TestLabelSetNotReused(t *testing.T) { } func TestReload(t *testing.T) { - var tests = []struct { + tests := []struct { in *targetgroup.Group out string }{ @@ -500,11 +500,10 @@ alerting: require.Equal(t, tt.out, res) } - } func TestDroppedAlertmanagers(t *testing.T) { - var tests = []struct { + tests := []struct { in *targetgroup.Group out string }{ diff --git a/pkg/labels/labels_test.go b/pkg/labels/labels_test.go index 57f28224e5..2d5c2ed379 100644 --- a/pkg/labels/labels_test.go +++ b/pkg/labels/labels_test.go @@ -704,7 +704,7 @@ func BenchmarkLabels_Hash(b *testing.B) { lbls: func() Labels { lbls := make(Labels, 10) for i := 0; i < len(lbls); i++ { - //Label ~50B name, 50B value. + // Label ~50B name, 50B value. lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)} } return lbls diff --git a/pkg/logging/file.go b/pkg/logging/file.go index 3c0c3e3b04..6b5751b016 100644 --- a/pkg/logging/file.go +++ b/pkg/logging/file.go @@ -21,11 +21,9 @@ import ( "github.com/pkg/errors" ) -var ( - timestampFormat = log.TimestampFormat( - func() time.Time { return time.Now().UTC() }, - "2006-01-02T15:04:05.000Z07:00", - ) +var timestampFormat = log.TimestampFormat( + func() time.Time { return time.Now().UTC() }, + "2006-01-02T15:04:05.000Z07:00", ) // JSONFileLogger represents a logger that writes JSON to a file. @@ -40,7 +38,7 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { return nil, nil } - f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666) + f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { return nil, errors.Wrap(err, "can't create json logger") } diff --git a/pkg/runtime/statfs_default.go b/pkg/runtime/statfs_default.go index a493a5cbb8..f850f2cd6d 100644 --- a/pkg/runtime/statfs_default.go +++ b/pkg/runtime/statfs_default.go @@ -23,7 +23,6 @@ import ( // Statfs returns the file system type (Unix only) func Statfs(path string) string { - // Types of file systems that may be returned by `statfs` fsTypes := map[int64]string{ 0xadf5: "ADFS_SUPER_MAGIC", diff --git a/pkg/runtime/statfs_linux_386.go b/pkg/runtime/statfs_linux_386.go index b45eecdd37..7494a0adf5 100644 --- a/pkg/runtime/statfs_linux_386.go +++ b/pkg/runtime/statfs_linux_386.go @@ -23,7 +23,6 @@ import ( // Statfs returns the file system type (Unix only) func Statfs(path string) string { - // Types of file systems that may be returned by `statfs` fsTypes := map[int32]string{ 0xadf5: "ADFS_SUPER_MAGIC", diff --git a/pkg/runtime/statfs_uint32.go b/pkg/runtime/statfs_uint32.go index fa10ebc967..72d670a396 100644 --- a/pkg/runtime/statfs_uint32.go +++ b/pkg/runtime/statfs_uint32.go @@ -23,7 +23,6 @@ import ( // Statfs returns the file system type (Unix only) func Statfs(path string) string { - // Types of file systems that may be returned by `statfs` fsTypes := map[uint32]string{ 0xadf5: "ADFS_SUPER_MAGIC", diff --git a/pkg/textparse/promparse_test.go b/pkg/textparse/promparse_test.go index 1676ccc138..653f0dd052 100644 --- a/pkg/textparse/promparse_test.go +++ b/pkg/textparse/promparse_test.go @@ -457,9 +457,7 @@ func BenchmarkParse(b *testing.B) { total := 0 for i := 0; i < b.N; i += promtestdataSampleCount { - var ( - decSamples = make(model.Vector, 0, 50) - ) + decSamples := make(model.Vector, 0, 50) sdec := expfmt.SampleDecoder{ Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.FmtText), Opts: &expfmt.DecodeOptions{ @@ -480,6 +478,7 @@ func BenchmarkParse(b *testing.B) { } } } + func BenchmarkGzip(b *testing.B) { for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { b.Run(fn, func(b *testing.B) { diff --git a/promql/engine.go b/promql/engine.go index bfa575cac8..5be7ef3ee6 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -87,12 +87,15 @@ type ( func (e ErrQueryTimeout) Error() string { return fmt.Sprintf("query timed out in %s", string(e)) } + func (e ErrQueryCanceled) Error() string { return fmt.Sprintf("query was canceled in %s", string(e)) } + func (e ErrTooManySamples) Error() string { return fmt.Sprintf("query processing would load too many samples into memory in %s", string(e)) } + func (e ErrStorage) Error() string { return e.Err.Error() } @@ -402,8 +405,10 @@ func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end tim return qry, nil } -var ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled") -var ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled") +var ( + ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled") + ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled") +) func (ng *Engine) validateOpts(expr parser.Expr) error { if ng.enableAtModifier && ng.enableNegativeOffset { @@ -2139,7 +2144,6 @@ type groupedAggregation struct { // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels // must be sorted. func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector { - result := map[uint64]*groupedAggregation{} orderedResult := []*groupedAggregation{} var k int64 @@ -2509,7 +2513,6 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { } if isStepInvariant { - // The function and all arguments are step invariant. return true } @@ -2559,7 +2562,6 @@ func newStepInvariantExpr(expr parser.Expr) parser.Expr { // Wrapping the inside of () makes it easy to unwrap the paren later. // But this effectively unwraps the paren. return newStepInvariantExpr(e.Expr) - } return &parser.StepInvariantExpr{Expr: expr} } diff --git a/promql/engine_test.go b/promql/engine_test.go index 7ac6ae1d2f..0986a22e5d 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -181,9 +181,11 @@ type errQuerier struct { func (q *errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { return errSeriesSet{err: q.err} } + func (*errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { return nil, nil, nil } + func (*errQuerier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { return nil, nil, nil } @@ -267,277 +269,278 @@ func TestSelectHintsSetCorrectly(t *testing.T) { // TODO(bwplotka): Add support for better hints when subquerying. expected []*storage.SelectHints - }{{ - query: "foo", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000}, - }, - }, { - query: "foo @ 15", start: 10000, - expected: []*storage.SelectHints{ - {Start: 10000, End: 15000}, - }, - }, { - query: "foo @ 1", start: 10000, - expected: []*storage.SelectHints{ - {Start: -4000, End: 1000}, - }, - }, { - query: "foo[2m]", start: 200000, - expected: []*storage.SelectHints{ - {Start: 80000, End: 200000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 180", start: 200000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 300", start: 200000, - expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 60", start: 200000, - expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000}, - }, - }, { - query: "foo[2m] offset 2m", start: 300000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 200 offset 2m", start: 300000, - expected: []*storage.SelectHints{ - {Start: -40000, End: 80000, Range: 120000}, - }, - }, { - query: "foo[2m:1s]", start: 300000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000}, - }, - }, { - query: "count_over_time(foo[2m:1s])", start: 300000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, - expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, - expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 165000, End: 290000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 155000, End: 280000, Func: "count_over_time"}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time"}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time"}, - }, - }, { + }{ + { + query: "foo", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000}, + }, + }, { + query: "foo @ 15", start: 10000, + expected: []*storage.SelectHints{ + {Start: 10000, End: 15000}, + }, + }, { + query: "foo @ 1", start: 10000, + expected: []*storage.SelectHints{ + {Start: -4000, End: 1000}, + }, + }, { + query: "foo[2m]", start: 200000, + expected: []*storage.SelectHints{ + {Start: 80000, End: 200000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 180", start: 200000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 180000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 300", start: 200000, + expected: []*storage.SelectHints{ + {Start: 180000, End: 300000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 60", start: 200000, + expected: []*storage.SelectHints{ + {Start: -60000, End: 60000, Range: 120000}, + }, + }, { + query: "foo[2m] offset 2m", start: 300000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 180000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 200 offset 2m", start: 300000, + expected: []*storage.SelectHints{ + {Start: -40000, End: 80000, Range: 120000}, + }, + }, { + query: "foo[2m:1s]", start: 300000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000}, + }, + }, { + query: "count_over_time(foo[2m:1s])", start: 300000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, + expected: []*storage.SelectHints{ + {Start: 75000, End: 200000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, + expected: []*storage.SelectHints{ + {Start: -25000, End: 100000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 165000, End: 290000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 155000, End: 280000, Func: "count_over_time"}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time"}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: -45000, End: 80000, Func: "count_over_time"}, + }, + }, { - query: "foo", start: 10000, end: 20000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 20000, Step: 1000}, + query: "foo", start: 10000, end: 20000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 20000, Step: 1000}, + }, + }, { + query: "foo @ 15", start: 10000, end: 20000, + expected: []*storage.SelectHints{ + {Start: 10000, End: 15000, Step: 1000}, + }, + }, { + query: "foo @ 1", start: 10000, end: 20000, + expected: []*storage.SelectHints{ + {Start: -4000, End: 1000, Step: 1000}, + }, + }, { + query: "rate(foo[2m] @ 180)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m] @ 300)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m] @ 60)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m])", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m:1s])", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 500000, Func: "rate", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 500000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 165000, End: 490000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 155000, End: 480000, Func: "count_over_time", Step: 1000}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "sum by (dim1) (foo)", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, + }, + }, { + query: "sum without (dim1) (foo)", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, + }, + }, { + query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000, + expected: []*storage.SelectHints{ + {Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000}, + }, + }, { + query: "sum by (dim1) (max by (dim2) (foo))", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, + }, + }, { + query: "(max by (dim1) (foo))[5s:1s]", start: 10000, + expected: []*storage.SelectHints{ + {Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}}, + }, + }, { + query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000, + expected: []*storage.SelectHints{ + {Start: 95000, End: 120000, Func: "sum", By: true}, + {Start: 95000, End: 120000, Func: "max", By: true}, + }, + }, { + query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 45000, End: 50000, Step: 1000}, + {Start: 245000, End: 250000, Step: 1000}, + {Start: 895000, End: 900000, Step: 1000}, + }, + }, { + query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 45000, End: 50000, Step: 1000}, + {Start: 95000, End: 500000, Step: 1000}, + {Start: 895000, End: 900000, Step: 1000}, + }, + }, { + query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000}, + {Start: 245000, End: 250000, Step: 1000}, + {Start: 895000, End: 900000, Step: 1000}, + }, + }, { + query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95000, End: 500000, Step: 1000}, + {Start: 95000, End: 500000, Step: 1000}, + }, + }, { + query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95000, End: 500000, Step: 1000}, + {Start: 655000, End: 780000, Step: 1000, Func: "rate"}, + }, + }, { // Hints are based on the inner most subquery timestamp. + query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000, + expected: []*storage.SelectHints{ + {Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time"}, + }, + }, { // Hints are based on the inner most subquery timestamp. + query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`, + expected: []*storage.SelectHints{ + {Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time"}, + }, }, - }, { - query: "foo @ 15", start: 10000, end: 20000, - expected: []*storage.SelectHints{ - {Start: 10000, End: 15000, Step: 1000}, - }, - }, { - query: "foo @ 1", start: 10000, end: 20000, - expected: []*storage.SelectHints{ - {Start: -4000, End: 1000, Step: 1000}, - }, - }, { - query: "rate(foo[2m] @ 180)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m] @ 300)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m] @ 60)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m])", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m:1s])", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "rate", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 165000, End: 490000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 155000, End: 480000, Func: "count_over_time", Step: 1000}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "sum by (dim1) (foo)", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, - }, - }, { - query: "sum without (dim1) (foo)", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, - }, - }, { - query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000, - expected: []*storage.SelectHints{ - {Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000}, - }, - }, { - query: "sum by (dim1) (max by (dim2) (foo))", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, - }, - }, { - query: "(max by (dim1) (foo))[5s:1s]", start: 10000, - expected: []*storage.SelectHints{ - {Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}}, - }, - }, { - query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000, - expected: []*storage.SelectHints{ - {Start: 95000, End: 120000, Func: "sum", By: true}, - {Start: 95000, End: 120000, Func: "max", By: true}, - }, - }, { - query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, - }, - }, { - query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, - }, - }, { - query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, - }, - }, { - query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, - }, - }, { - query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 655000, End: 780000, Step: 1000, Func: "rate"}, - }, - }, { // Hints are based on the inner most subquery timestamp. - query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000, - expected: []*storage.SelectHints{ - {Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time"}, - }, - }, { // Hints are based on the inner most subquery timestamp. - query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`, - expected: []*storage.SelectHints{ - {Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time"}, - }, - }, } { t.Run(tc.query, func(t *testing.T) { engine := NewEngine(opts) @@ -559,7 +562,6 @@ func TestSelectHintsSetCorrectly(t *testing.T) { require.Equal(t, tc.expected, hintsRecorder.hints) }) - } } @@ -645,25 +647,31 @@ load 10s { Query: "metric", Result: Vector{ - Sample{Point: Point{V: 1, T: 1000}, - Metric: labels.FromStrings("__name__", "metric")}, + Sample{ + Point: Point{V: 1, T: 1000}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(1, 0), }, { Query: "metric[20s]", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(10, 0), }, // Range queries. { Query: "1", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, - Metric: labels.FromStrings()}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, + Metric: labels.FromStrings(), + }, }, Start: time.Unix(0, 0), End: time.Unix(2, 0), @@ -671,9 +679,11 @@ load 10s }, { Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(0, 0), End: time.Unix(2, 0), @@ -681,9 +691,11 @@ load 10s }, { Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(0, 0), End: time.Unix(10, 0), @@ -743,23 +755,28 @@ load 10s Query: "1", MaxSamples: 1, Start: time.Unix(1, 0), - }, { + }, + { Query: "metric", MaxSamples: 1, Start: time.Unix(1, 0), - }, { + }, + { Query: "metric[20s]", MaxSamples: 2, Start: time.Unix(10, 0), - }, { + }, + { Query: "rate(metric[20s])", MaxSamples: 3, Start: time.Unix(10, 0), - }, { + }, + { Query: "metric[20s:5s]", MaxSamples: 3, Start: time.Unix(10, 0), - }, { + }, + { Query: "metric[20s] @ 10", MaxSamples: 2, Start: time.Unix(0, 0), @@ -771,38 +788,44 @@ load 10s Start: time.Unix(0, 0), End: time.Unix(2, 0), Interval: time.Second, - }, { + }, + { Query: "1", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(2, 0), Interval: time.Second, - }, { + }, + { Query: "metric", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(2, 0), Interval: time.Second, - }, { + }, + { Query: "metric", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { Query: "rate(bigmetric[1s])", MaxSamples: 1, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // Result is duplicated, so @ also produces 3 samples. Query: "metric @ 10", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // The peak samples in memory is during the first evaluation: // - Subquery takes 22 samples, 11 for each bigmetric, // - Result is calculated per series where the series samples is buffered, hence 11 more here. @@ -814,7 +837,8 @@ load 10s Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // Here the reasoning is same as above. But LHS and RHS are done one after another. // So while one of them takes 35 samples at peak, we need to hold the 2 sample // result of the other till then. @@ -823,7 +847,8 @@ load 10s Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. // Hence at peak it is 2*21 (subquery from 0s to 20s) @@ -834,14 +859,16 @@ load 10s Start: time.Unix(10, 0), End: time.Unix(20, 0), Interval: 5 * time.Second, - }, { + }, + { // Nested subquery. // We saw that innermost rate takes 35 samples which is still the peak // since the other two subqueries just duplicate the result. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, MaxSamples: 35, Start: time.Unix(10, 0), - }, { + }, + { // Nested subquery. // Now the outmost subquery produces more samples than inner most rate. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`, @@ -1177,9 +1204,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:10s]", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1189,9 +1218,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s]", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1201,9 +1232,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 2s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1213,9 +1246,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 6s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1225,9 +1260,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 4s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1237,9 +1274,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 5s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1249,9 +1288,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 6s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1261,9 +1302,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 7s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1282,9 +1325,11 @@ func TestSubquerySelector(t *testing.T) { Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 9990, T: 9990000}, {V: 10000, T: 10000000}, {V: 100, T: 10010000}, {V: 130, T: 10020000}}, - Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production")}, + Matrix{ + Series{ + Points: []Point{{V: 9990, T: 9990000}, {V: 10000, T: 10000000}, {V: 100, T: 10010000}, {V: 130, T: 10020000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, }, nil, }, @@ -1294,9 +1339,11 @@ func TestSubquerySelector(t *testing.T) { Query: `http_requests{group=~"pro.*",instance="0"}[5m:]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 9840, T: 9840000}, {V: 9900, T: 9900000}, {V: 9960, T: 9960000}, {V: 130, T: 10020000}, {V: 310, T: 10080000}}, - Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production")}, + Matrix{ + Series{ + Points: []Point{{V: 9840, T: 9840000}, {V: 9900, T: 9900000}, {V: 9960, T: 9960000}, {V: 130, T: 10020000}, {V: 310, T: 10080000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, }, nil, }, @@ -1306,9 +1353,11 @@ func TestSubquerySelector(t *testing.T) { Query: `http_requests{group=~"pro.*",instance="0"}[5m:] offset 20m`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 8640, T: 8640000}, {V: 8700, T: 8700000}, {V: 8760, T: 8760000}, {V: 8820, T: 8820000}, {V: 8880, T: 8880000}}, - Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production")}, + Matrix{ + Series{ + Points: []Point{{V: 8640, T: 8640000}, {V: 8700, T: 8700000}, {V: 8760, T: 8760000}, {V: 8820, T: 8820000}, {V: 8880, T: 8880000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, }, nil, }, @@ -1344,9 +1393,11 @@ func TestSubquerySelector(t *testing.T) { Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 270, T: 90000}, {V: 300, T: 100000}, {V: 330, T: 110000}, {V: 360, T: 120000}}, - Metric: labels.Labels{}}, + Matrix{ + Series{ + Points: []Point{{V: 270, T: 90000}, {V: 300, T: 100000}, {V: 330, T: 110000}, {V: 360, T: 120000}}, + Metric: labels.Labels{}, + }, }, nil, }, @@ -1356,9 +1407,11 @@ func TestSubquerySelector(t *testing.T) { Query: `sum(http_requests)[40s:10s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 800, T: 80000}, {V: 900, T: 90000}, {V: 1000, T: 100000}, {V: 1100, T: 110000}, {V: 1200, T: 120000}}, - Metric: labels.Labels{}}, + Matrix{ + Series{ + Points: []Point{{V: 800, T: 80000}, {V: 900, T: 90000}, {V: 1000, T: 100000}, {V: 1100, T: 110000}, {V: 1200, T: 120000}}, + Metric: labels.Labels{}, + }, }, nil, }, @@ -1368,9 +1421,11 @@ func TestSubquerySelector(t *testing.T) { Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1000, T: 100000}, {V: 1000, T: 105000}, {V: 1100, T: 110000}, {V: 1100, T: 115000}, {V: 1200, T: 120000}}, - Metric: labels.Labels{}}, + Matrix{ + Series{ + Points: []Point{{V: 1000, T: 100000}, {V: 1000, T: 105000}, {V: 1100, T: 110000}, {V: 1100, T: 115000}, {V: 1200, T: 120000}}, + Metric: labels.Labels{}, + }, }, nil, }, @@ -1535,7 +1590,7 @@ func TestQueryLogger_error(t *testing.T) { func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { startTime := time.Unix(1000, 0) endTime := time.Unix(9999, 0) - var testCases = []struct { + testCases := []struct { input string // The input to be parsed. expected parser.Expr // The expected expression AST. }{ @@ -2337,9 +2392,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), @@ -2350,9 +2407,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000 0 0 0 0`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), @@ -2363,9 +2422,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000 10000 100000 1000000 10000000`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}, {V: 110000, T: 180000}, {V: 11000000, T: 240000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}, {V: 110000, T: 180000}, {V: 11000000, T: 240000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(240, 0), @@ -2376,9 +2437,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 5 17 42 2 7 905 51`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 5, T: 0}, {V: 59, T: 60000}, {V: 9, T: 120000}, {V: 956, T: 180000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 5, T: 0}, {V: 59, T: 60000}, {V: 9, T: 120000}, {V: 956, T: 180000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(180, 0), @@ -2389,9 +2452,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s metric 1+1x4`, Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, - Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), @@ -2402,9 +2467,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s metric 1+1x8`, Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, - Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), diff --git a/promql/functions.go b/promql/functions.go index 49ff09678f..19bcc64497 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -56,7 +56,7 @@ func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector { +func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) Vector { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 7762425640..e9fcdae084 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -407,7 +407,7 @@ type PositionRange struct { // mergeRanges is a helper function to merge the PositionRanges of two Nodes. // Note that the arguments must be in the same order as they // occur in the input string. -func mergeRanges(first Node, last Node) PositionRange { +func mergeRanges(first, last Node) PositionRange { return PositionRange{ Start: first.PositionRange().Start, End: last.PositionRange().End, @@ -426,15 +426,19 @@ func (i *Item) PositionRange() PositionRange { func (e *AggregateExpr) PositionRange() PositionRange { return e.PosRange } + func (e *BinaryExpr) PositionRange() PositionRange { return mergeRanges(e.LHS, e.RHS) } + func (e *Call) PositionRange() PositionRange { return e.PosRange } + func (e *EvalStmt) PositionRange() PositionRange { return e.Expr.PositionRange() } + func (e Expressions) PositionRange() PositionRange { if len(e) == 0 { // Position undefined. @@ -445,33 +449,40 @@ func (e Expressions) PositionRange() PositionRange { } return mergeRanges(e[0], e[len(e)-1]) } + func (e *MatrixSelector) PositionRange() PositionRange { return PositionRange{ Start: e.VectorSelector.PositionRange().Start, End: e.EndPos, } } + func (e *SubqueryExpr) PositionRange() PositionRange { return PositionRange{ Start: e.Expr.PositionRange().Start, End: e.EndPos, } } + func (e *NumberLiteral) PositionRange() PositionRange { return e.PosRange } + func (e *ParenExpr) PositionRange() PositionRange { return e.PosRange } + func (e *StringLiteral) PositionRange() PositionRange { return e.PosRange } + func (e *UnaryExpr) PositionRange() PositionRange { return PositionRange{ Start: e.StartPos, End: e.Expr.PositionRange().End, } } + func (e *VectorSelector) PositionRange() PositionRange { return e.PosRange } diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 71614913a0..71bce62bc5 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -33,82 +33,84 @@ type yySymType struct { duration time.Duration } -const EQL = 57346 -const BLANK = 57347 -const COLON = 57348 -const COMMA = 57349 -const COMMENT = 57350 -const DURATION = 57351 -const EOF = 57352 -const ERROR = 57353 -const IDENTIFIER = 57354 -const LEFT_BRACE = 57355 -const LEFT_BRACKET = 57356 -const LEFT_PAREN = 57357 -const METRIC_IDENTIFIER = 57358 -const NUMBER = 57359 -const RIGHT_BRACE = 57360 -const RIGHT_BRACKET = 57361 -const RIGHT_PAREN = 57362 -const SEMICOLON = 57363 -const SPACE = 57364 -const STRING = 57365 -const TIMES = 57366 -const operatorsStart = 57367 -const ADD = 57368 -const DIV = 57369 -const EQLC = 57370 -const EQL_REGEX = 57371 -const GTE = 57372 -const GTR = 57373 -const LAND = 57374 -const LOR = 57375 -const LSS = 57376 -const LTE = 57377 -const LUNLESS = 57378 -const MOD = 57379 -const MUL = 57380 -const NEQ = 57381 -const NEQ_REGEX = 57382 -const POW = 57383 -const SUB = 57384 -const AT = 57385 -const ATAN2 = 57386 -const operatorsEnd = 57387 -const aggregatorsStart = 57388 -const AVG = 57389 -const BOTTOMK = 57390 -const COUNT = 57391 -const COUNT_VALUES = 57392 -const GROUP = 57393 -const MAX = 57394 -const MIN = 57395 -const QUANTILE = 57396 -const STDDEV = 57397 -const STDVAR = 57398 -const SUM = 57399 -const TOPK = 57400 -const aggregatorsEnd = 57401 -const keywordsStart = 57402 -const BOOL = 57403 -const BY = 57404 -const GROUP_LEFT = 57405 -const GROUP_RIGHT = 57406 -const IGNORING = 57407 -const OFFSET = 57408 -const ON = 57409 -const WITHOUT = 57410 -const keywordsEnd = 57411 -const preprocessorStart = 57412 -const START = 57413 -const END = 57414 -const preprocessorEnd = 57415 -const startSymbolsStart = 57416 -const START_METRIC = 57417 -const START_SERIES_DESCRIPTION = 57418 -const START_EXPRESSION = 57419 -const START_METRIC_SELECTOR = 57420 -const startSymbolsEnd = 57421 +const ( + EQL = 57346 + BLANK = 57347 + COLON = 57348 + COMMA = 57349 + COMMENT = 57350 + DURATION = 57351 + EOF = 57352 + ERROR = 57353 + IDENTIFIER = 57354 + LEFT_BRACE = 57355 + LEFT_BRACKET = 57356 + LEFT_PAREN = 57357 + METRIC_IDENTIFIER = 57358 + NUMBER = 57359 + RIGHT_BRACE = 57360 + RIGHT_BRACKET = 57361 + RIGHT_PAREN = 57362 + SEMICOLON = 57363 + SPACE = 57364 + STRING = 57365 + TIMES = 57366 + operatorsStart = 57367 + ADD = 57368 + DIV = 57369 + EQLC = 57370 + EQL_REGEX = 57371 + GTE = 57372 + GTR = 57373 + LAND = 57374 + LOR = 57375 + LSS = 57376 + LTE = 57377 + LUNLESS = 57378 + MOD = 57379 + MUL = 57380 + NEQ = 57381 + NEQ_REGEX = 57382 + POW = 57383 + SUB = 57384 + AT = 57385 + ATAN2 = 57386 + operatorsEnd = 57387 + aggregatorsStart = 57388 + AVG = 57389 + BOTTOMK = 57390 + COUNT = 57391 + COUNT_VALUES = 57392 + GROUP = 57393 + MAX = 57394 + MIN = 57395 + QUANTILE = 57396 + STDDEV = 57397 + STDVAR = 57398 + SUM = 57399 + TOPK = 57400 + aggregatorsEnd = 57401 + keywordsStart = 57402 + BOOL = 57403 + BY = 57404 + GROUP_LEFT = 57405 + GROUP_RIGHT = 57406 + IGNORING = 57407 + OFFSET = 57408 + ON = 57409 + WITHOUT = 57410 + keywordsEnd = 57411 + preprocessorStart = 57412 + START = 57413 + END = 57414 + preprocessorEnd = 57415 + startSymbolsStart = 57416 + START_METRIC = 57417 + START_SERIES_DESCRIPTION = 57418 + START_EXPRESSION = 57419 + START_METRIC_SELECTOR = 57420 + startSymbolsEnd = 57421 +) var yyToknames = [...]string{ "$end", @@ -194,9 +196,11 @@ var yyToknames = [...]string{ var yyStatenames = [...]string{} -const yyEofCode = 1 -const yyErrCode = 2 -const yyInitialStackSize = 16 +const ( + yyEofCode = 1 + yyErrCode = 2 + yyInitialStackSize = 16 +) //line promql/parser/generated_parser.y:749 diff --git a/promql/parser/lex_test.go b/promql/parser/lex_test.go index 95f4d978da..8e22f41d9f 100644 --- a/promql/parser/lex_test.go +++ b/promql/parser/lex_test.go @@ -318,25 +318,32 @@ var tests = []struct { { input: "offset", expected: []Item{{OFFSET, 0, "offset"}}, - }, { + }, + { input: "by", expected: []Item{{BY, 0, "by"}}, - }, { + }, + { input: "without", expected: []Item{{WITHOUT, 0, "without"}}, - }, { + }, + { input: "on", expected: []Item{{ON, 0, "on"}}, - }, { + }, + { input: "ignoring", expected: []Item{{IGNORING, 0, "ignoring"}}, - }, { + }, + { input: "group_left", expected: []Item{{GROUP_LEFT, 0, "group_left"}}, - }, { + }, + { input: "group_right", expected: []Item{{GROUP_RIGHT, 0, "group_right"}}, - }, { + }, + { input: "bool", expected: []Item{{BOOL, 0, "bool"}}, }, @@ -569,7 +576,8 @@ var tests = []struct { {DURATION, 24, `4s`}, {RIGHT_BRACKET, 26, `]`}, }, - }, { + }, + { input: `test:name{on!~"b:ar"}[4m:4s]`, expected: []Item{ {METRIC_IDENTIFIER, 0, `test:name`}, @@ -584,7 +592,8 @@ var tests = []struct { {DURATION, 25, `4s`}, {RIGHT_BRACKET, 27, `]`}, }, - }, { + }, + { input: `test:name{on!~"b:ar"}[4m:]`, expected: []Item{ {METRIC_IDENTIFIER, 0, `test:name`}, @@ -598,7 +607,8 @@ var tests = []struct { {COLON, 24, `:`}, {RIGHT_BRACKET, 25, `]`}, }, - }, { // Nested Subquery. + }, + { // Nested Subquery. input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:])[4m:3s]`, expected: []Item{ @@ -646,7 +656,8 @@ var tests = []struct { {OFFSET, 29, "offset"}, {DURATION, 36, "10m"}, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] offset 6m)[4m:3s]`, expected: []Item{ @@ -737,7 +748,6 @@ func TestLexer(t *testing.T) { if item.Typ == ERROR { hasError = true } - } if !hasError { t.Logf("%d: input %q", i, test.input) diff --git a/promql/parser/parse.go b/promql/parser/parse.go index edecfc0e9b..2c44c3c2cf 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -241,7 +241,7 @@ func (p *parser) addParseErr(positionRange PositionRange, err error) { // unexpected creates a parser error complaining about an unexpected lexer item. // The item that is presented as unexpected is always the last item produced // by the lexer. -func (p *parser) unexpected(context string, expected string) { +func (p *parser) unexpected(context, expected string) { var errMsg strings.Builder // Do not report lexer errors twice @@ -354,7 +354,8 @@ func (p *parser) InjectItem(typ ItemType) { p.inject = typ p.injecting = true } -func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers Node, rhs Node) *BinaryExpr { + +func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr { ret := modifiers.(*BinaryExpr) ret.LHS = lhs.(Expr) @@ -374,7 +375,7 @@ func (p *parser) assembleVectorSelector(vs *VectorSelector) { } } -func (p *parser) newAggregateExpr(op Item, modifier Node, args Node) (ret *AggregateExpr) { +func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateExpr) { ret = modifier.(*AggregateExpr) arguments := args.(Expressions) @@ -650,10 +651,9 @@ func (p *parser) parseGenerated(startSymbol ItemType) interface{} { p.yyParser.Parse(p) return p.generatedParserResult - } -func (p *parser) newLabelMatcher(label Item, operator Item, value Item) *labels.Matcher { +func (p *parser) newLabelMatcher(label, operator, value Item) *labels.Matcher { op := operator.Typ val := p.unquoteString(value.Val) diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index b73dfd22a9..99cc924e04 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -40,73 +40,85 @@ var testExpr = []struct { Val: 1, PosRange: PositionRange{Start: 0, End: 1}, }, - }, { + }, + { input: "+Inf", expected: &NumberLiteral{ Val: math.Inf(1), PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: "-Inf", expected: &NumberLiteral{ Val: math.Inf(-1), PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: ".5", expected: &NumberLiteral{ Val: 0.5, PosRange: PositionRange{Start: 0, End: 2}, }, - }, { + }, + { input: "5.", expected: &NumberLiteral{ Val: 5, PosRange: PositionRange{Start: 0, End: 2}, }, - }, { + }, + { input: "123.4567", expected: &NumberLiteral{ Val: 123.4567, PosRange: PositionRange{Start: 0, End: 8}, }, - }, { + }, + { input: "5e-3", expected: &NumberLiteral{ Val: 0.005, PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: "5e3", expected: &NumberLiteral{ Val: 5000, PosRange: PositionRange{Start: 0, End: 3}, }, - }, { + }, + { input: "0xc", expected: &NumberLiteral{ Val: 12, PosRange: PositionRange{Start: 0, End: 3}, }, - }, { + }, + { input: "0755", expected: &NumberLiteral{ Val: 493, PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: "+5.5e-3", expected: &NumberLiteral{ Val: 0.0055, PosRange: PositionRange{Start: 0, End: 7}, }, - }, { + }, + { input: "-0755", expected: &NumberLiteral{ Val: -493, PosRange: PositionRange{Start: 0, End: 5}, }, - }, { + }, + { input: "1 + 1", expected: &BinaryExpr{ Op: ADD, @@ -119,7 +131,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 - 1", expected: &BinaryExpr{ Op: SUB, @@ -132,7 +145,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 * 1", expected: &BinaryExpr{ Op: MUL, @@ -145,7 +159,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 % 1", expected: &BinaryExpr{ Op: MOD, @@ -158,7 +173,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 / 1", expected: &BinaryExpr{ Op: DIV, @@ -171,7 +187,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 == bool 1", expected: &BinaryExpr{ Op: EQLC, @@ -185,7 +202,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 != bool 1", expected: &BinaryExpr{ Op: NEQ, @@ -199,7 +217,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 > bool 1", expected: &BinaryExpr{ Op: GTR, @@ -213,7 +232,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 >= bool 1", expected: &BinaryExpr{ Op: GTE, @@ -227,7 +247,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 < bool 1", expected: &BinaryExpr{ Op: LSS, @@ -241,7 +262,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 <= bool 1", expected: &BinaryExpr{ Op: LTE, @@ -255,7 +277,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "-1^2", expected: &UnaryExpr{ Op: SUB, @@ -271,7 +294,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "-1*2", expected: &BinaryExpr{ Op: MUL, @@ -284,7 +308,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 3, End: 4}, }, }, - }, { + }, + { input: "-1+2", expected: &BinaryExpr{ Op: ADD, @@ -297,7 +322,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 3, End: 4}, }, }, - }, { + }, + { input: "-1^-2", expected: &UnaryExpr{ Op: SUB, @@ -313,7 +339,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "+1 + -2 * 1", expected: &BinaryExpr{ Op: ADD, @@ -333,7 +360,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "1 + 2/(3*1)", expected: &BinaryExpr{ Op: ADD, @@ -363,7 +391,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "1 < bool 2 - 1 * 2", expected: &BinaryExpr{ Op: LSS, @@ -391,7 +420,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "-some_metric", expected: &UnaryExpr{ Op: SUB, @@ -406,7 +436,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "+some_metric", expected: &UnaryExpr{ Op: ADD, @@ -421,7 +452,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: " +some_metric", expected: &UnaryExpr{ Op: ADD, @@ -437,103 +469,128 @@ var testExpr = []struct { }, StartPos: 1, }, - }, { + }, + { input: "", fail: true, errMsg: "no expression found in input", - }, { + }, + { input: "# just a comment\n\n", fail: true, errMsg: "no expression found in input", - }, { + }, + { input: "1+", fail: true, errMsg: "unexpected end of input", - }, { + }, + { input: ".", fail: true, errMsg: "unexpected character: '.'", - }, { + }, + { input: "2.5.", fail: true, errMsg: "unexpected character: '.'", - }, { + }, + { input: "100..4", fail: true, errMsg: `unexpected number ".4"`, - }, { + }, + { input: "0deadbeef", fail: true, errMsg: "bad number or duration syntax: \"0de\"", - }, { + }, + { input: "1 /", fail: true, errMsg: "unexpected end of input", - }, { + }, + { input: "*1", fail: true, errMsg: "unexpected ", - }, { + }, + { input: "(1))", fail: true, errMsg: "unexpected right parenthesis ')'", - }, { + }, + { input: "((1)", fail: true, errMsg: "unclosed left parenthesis", - }, { + }, + { input: "999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999", fail: true, errMsg: "out of range", - }, { + }, + { input: "(", fail: true, errMsg: "unclosed left parenthesis", - }, { + }, + { input: "1 and 1", fail: true, errMsg: "set operator \"and\" not allowed in binary scalar expression", - }, { + }, + { input: "1 == 1", fail: true, errMsg: "1:3: parse error: comparisons between scalars must use BOOL modifier", - }, { + }, + { input: "1 or 1", fail: true, errMsg: "set operator \"or\" not allowed in binary scalar expression", - }, { + }, + { input: "1 unless 1", fail: true, errMsg: "set operator \"unless\" not allowed in binary scalar expression", - }, { + }, + { input: "1 !~ 1", fail: true, errMsg: `unexpected character after '!': '~'`, - }, { + }, + { input: "1 =~ 1", fail: true, errMsg: `unexpected character after '=': '~'`, - }, { + }, + { input: `-"string"`, fail: true, errMsg: `unary expression only allowed on expressions of type scalar or instant vector, got "string"`, - }, { + }, + { input: `-test[5m]`, fail: true, errMsg: `unary expression only allowed on expressions of type scalar or instant vector, got "range vector"`, - }, { + }, + { input: `*test`, fail: true, errMsg: "unexpected ", - }, { + }, + { input: "1 offset 1d", fail: true, errMsg: "1:1: parse error: offset modifier must be preceded by an instant vector selector or range vector selector or a subquery", - }, { + }, + { input: "foo offset 1s offset 2s", fail: true, errMsg: "offset may not be set multiple times", - }, { + }, + { input: "a - on(b) ignoring(c) d", fail: true, errMsg: "1:11: parse error: unexpected ", @@ -565,7 +622,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardOneToOne}, }, - }, { + }, + { input: "foo * sum", expected: &BinaryExpr{ Op: MUL, @@ -591,7 +649,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardOneToOne}, }, - }, { + }, + { input: "foo == 1", expected: &BinaryExpr{ Op: EQLC, @@ -610,7 +669,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 7, End: 8}, }, }, - }, { + }, + { input: "foo == bool 1", expected: &BinaryExpr{ Op: EQLC, @@ -630,7 +690,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "2.5 / bar", expected: &BinaryExpr{ Op: DIV, @@ -649,7 +710,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "foo and bar", expected: &BinaryExpr{ Op: LAND, @@ -675,7 +737,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { input: "foo or bar", expected: &BinaryExpr{ Op: LOR, @@ -701,7 +764,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { input: "foo unless bar", expected: &BinaryExpr{ Op: LUNLESS, @@ -727,7 +791,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { // Test and/or precedence and reassigning of operands. input: "foo + bar or bla and blub", expected: &BinaryExpr{ @@ -782,7 +847,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { // Test and/or/unless precedence. input: "foo and bar unless baz or qux", expected: &BinaryExpr{ @@ -837,7 +903,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { // Test precedence and reassigning of operands. input: "bar + on(foo) bla / on(baz, buz) group_right(test) blub", expected: &BinaryExpr{ @@ -887,7 +954,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo * on(test,blub) bar", expected: &BinaryExpr{ Op: MUL, @@ -917,7 +985,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo * on(test,blub) group_left bar", expected: &BinaryExpr{ Op: MUL, @@ -947,7 +1016,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo and on(test,blub) bar", expected: &BinaryExpr{ Op: LAND, @@ -977,7 +1047,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo and on() bar", expected: &BinaryExpr{ Op: LAND, @@ -1007,7 +1078,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo and ignoring(test,blub) bar", expected: &BinaryExpr{ Op: LAND, @@ -1036,7 +1108,8 @@ var testExpr = []struct { MatchingLabels: []string{"test", "blub"}, }, }, - }, { + }, + { input: "foo and ignoring() bar", expected: &BinaryExpr{ Op: LAND, @@ -1065,7 +1138,8 @@ var testExpr = []struct { MatchingLabels: []string{}, }, }, - }, { + }, + { input: "foo unless on(bar) baz", expected: &BinaryExpr{ Op: LUNLESS, @@ -1095,7 +1169,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo / on(test,blub) group_left(bar) bar", expected: &BinaryExpr{ Op: DIV, @@ -1126,7 +1201,8 @@ var testExpr = []struct { Include: []string{"bar"}, }, }, - }, { + }, + { input: "foo / ignoring(test,blub) group_left(blub) bar", expected: &BinaryExpr{ Op: DIV, @@ -1156,7 +1232,8 @@ var testExpr = []struct { Include: []string{"blub"}, }, }, - }, { + }, + { input: "foo / ignoring(test,blub) group_left(bar) bar", expected: &BinaryExpr{ Op: DIV, @@ -1186,7 +1263,8 @@ var testExpr = []struct { Include: []string{"bar"}, }, }, - }, { + }, + { input: "foo - on(test,blub) group_right(bar,foo) bar", expected: &BinaryExpr{ Op: SUB, @@ -1217,7 +1295,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo - ignoring(test,blub) group_right(bar,foo) bar", expected: &BinaryExpr{ Op: SUB, @@ -1247,79 +1326,98 @@ var testExpr = []struct { Include: []string{"bar", "foo"}, }, }, - }, { + }, + { input: "foo and 1", fail: true, errMsg: "set operator \"and\" not allowed in binary scalar expression", - }, { + }, + { input: "1 and foo", fail: true, errMsg: "set operator \"and\" not allowed in binary scalar expression", - }, { + }, + { input: "foo or 1", fail: true, errMsg: "set operator \"or\" not allowed in binary scalar expression", - }, { + }, + { input: "1 or foo", fail: true, errMsg: "set operator \"or\" not allowed in binary scalar expression", - }, { + }, + { input: "foo unless 1", fail: true, errMsg: "set operator \"unless\" not allowed in binary scalar expression", - }, { + }, + { input: "1 unless foo", fail: true, errMsg: "set operator \"unless\" not allowed in binary scalar expression", - }, { + }, + { input: "1 or on(bar) foo", fail: true, errMsg: "vector matching only allowed between instant vectors", - }, { + }, + { input: "foo == on(bar) 10", fail: true, errMsg: "vector matching only allowed between instant vectors", - }, { + }, + { input: "foo + group_left(baz) bar", fail: true, errMsg: "unexpected ", - }, { + }, + { input: "foo and on(bar) group_left(baz) bar", fail: true, errMsg: "no grouping allowed for \"and\" operation", - }, { + }, + { input: "foo and on(bar) group_right(baz) bar", fail: true, errMsg: "no grouping allowed for \"and\" operation", - }, { + }, + { input: "foo or on(bar) group_left(baz) bar", fail: true, errMsg: "no grouping allowed for \"or\" operation", - }, { + }, + { input: "foo or on(bar) group_right(baz) bar", fail: true, errMsg: "no grouping allowed for \"or\" operation", - }, { + }, + { input: "foo unless on(bar) group_left(baz) bar", fail: true, errMsg: "no grouping allowed for \"unless\" operation", - }, { + }, + { input: "foo unless on(bar) group_right(baz) bar", fail: true, errMsg: "no grouping allowed for \"unless\" operation", - }, { + }, + { input: `http_requests{group="production"} + on(instance) group_left(job,instance) cpu_count{type="smp"}`, fail: true, errMsg: "label \"instance\" must not occur in ON and GROUP clause at once", - }, { + }, + { input: "foo + bool bar", fail: true, errMsg: "bool modifier can only be used on comparison operators", - }, { + }, + { input: "foo + bool 10", fail: true, errMsg: "bool modifier can only be used on comparison operators", - }, { + }, + { input: "foo and bool 10", fail: true, errMsg: "bool modifier can only be used on comparison operators", @@ -1337,7 +1435,8 @@ var testExpr = []struct { End: 3, }, }, - }, { + }, + { input: "min", expected: &VectorSelector{ Name: "min", @@ -1349,7 +1448,8 @@ var testExpr = []struct { End: 3, }, }, - }, { + }, + { input: "foo offset 5m", expected: &VectorSelector{ Name: "foo", @@ -1362,7 +1462,8 @@ var testExpr = []struct { End: 13, }, }, - }, { + }, + { input: "foo offset -7m", expected: &VectorSelector{ Name: "foo", @@ -1375,7 +1476,8 @@ var testExpr = []struct { End: 14, }, }, - }, { + }, + { input: `foo OFFSET 1h30m`, expected: &VectorSelector{ Name: "foo", @@ -1388,7 +1490,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `foo OFFSET 1m30ms`, expected: &VectorSelector{ Name: "foo", @@ -1401,7 +1504,8 @@ var testExpr = []struct { End: 17, }, }, - }, { + }, + { input: `foo @ 1603774568`, expected: &VectorSelector{ Name: "foo", @@ -1414,7 +1518,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `foo @ -100`, expected: &VectorSelector{ Name: "foo", @@ -1427,7 +1532,8 @@ var testExpr = []struct { End: 10, }, }, - }, { + }, + { input: `foo @ .3`, expected: &VectorSelector{ Name: "foo", @@ -1440,7 +1546,8 @@ var testExpr = []struct { End: 8, }, }, - }, { + }, + { input: `foo @ 3.`, expected: &VectorSelector{ Name: "foo", @@ -1453,7 +1560,8 @@ var testExpr = []struct { End: 8, }, }, - }, { + }, + { input: `foo @ 3.33`, expected: &VectorSelector{ Name: "foo", @@ -1466,7 +1574,8 @@ var testExpr = []struct { End: 10, }, }, - }, { // Rounding off. + }, + { // Rounding off. input: `foo @ 3.3333`, expected: &VectorSelector{ Name: "foo", @@ -1479,7 +1588,8 @@ var testExpr = []struct { End: 12, }, }, - }, { // Rounding off. + }, + { // Rounding off. input: `foo @ 3.3335`, expected: &VectorSelector{ Name: "foo", @@ -1492,7 +1602,8 @@ var testExpr = []struct { End: 12, }, }, - }, { + }, + { input: `foo @ 3e2`, expected: &VectorSelector{ Name: "foo", @@ -1505,7 +1616,8 @@ var testExpr = []struct { End: 9, }, }, - }, { + }, + { input: `foo @ 3e-1`, expected: &VectorSelector{ Name: "foo", @@ -1518,7 +1630,8 @@ var testExpr = []struct { End: 10, }, }, - }, { + }, + { input: `foo @ 0xA`, expected: &VectorSelector{ Name: "foo", @@ -1531,7 +1644,8 @@ var testExpr = []struct { End: 9, }, }, - }, { + }, + { input: `foo @ -3.3e1`, expected: &VectorSelector{ Name: "foo", @@ -1544,27 +1658,33 @@ var testExpr = []struct { End: 12, }, }, - }, { + }, + { input: `foo @ +Inf`, fail: true, errMsg: "1:1: parse error: timestamp out of bounds for @ modifier: +Inf", - }, { + }, + { input: `foo @ -Inf`, fail: true, errMsg: "1:1: parse error: timestamp out of bounds for @ modifier: -Inf", - }, { + }, + { input: `foo @ NaN`, fail: true, errMsg: "1:1: parse error: timestamp out of bounds for @ modifier: NaN", - }, { + }, + { input: fmt.Sprintf(`foo @ %f`, float64(math.MaxInt64)+1), fail: true, errMsg: fmt.Sprintf("1:1: parse error: timestamp out of bounds for @ modifier: %f", float64(math.MaxInt64)+1), - }, { + }, + { input: fmt.Sprintf(`foo @ %f`, float64(math.MinInt64)-1), fail: true, errMsg: fmt.Sprintf("1:1: parse error: timestamp out of bounds for @ modifier: %f", float64(math.MinInt64)-1), - }, { + }, + { input: `foo:bar{a="bc"}`, expected: &VectorSelector{ Name: "foo:bar", @@ -1577,7 +1697,8 @@ var testExpr = []struct { End: 15, }, }, - }, { + }, + { input: `foo{NaN='bc'}`, expected: &VectorSelector{ Name: "foo", @@ -1590,7 +1711,8 @@ var testExpr = []struct { End: 13, }, }, - }, { + }, + { input: `foo{bar='}'}`, expected: &VectorSelector{ Name: "foo", @@ -1603,7 +1725,8 @@ var testExpr = []struct { End: 12, }, }, - }, { + }, + { input: `foo{a="b", foo!="bar", test=~"test", bar!~"baz"}`, expected: &VectorSelector{ Name: "foo", @@ -1619,7 +1742,8 @@ var testExpr = []struct { End: 48, }, }, - }, { + }, + { input: `foo{a="b", foo!="bar", test=~"test", bar!~"baz",}`, expected: &VectorSelector{ Name: "foo", @@ -1635,89 +1759,110 @@ var testExpr = []struct { End: 49, }, }, - }, { + }, + { input: `{`, fail: true, errMsg: "unexpected end of input inside braces", - }, { + }, + { input: `}`, fail: true, errMsg: "unexpected character: '}'", - }, { + }, + { input: `some{`, fail: true, errMsg: "unexpected end of input inside braces", - }, { + }, + { input: `some}`, fail: true, errMsg: "unexpected character: '}'", - }, { + }, + { input: `some_metric{a=b}`, fail: true, errMsg: "unexpected identifier \"b\" in label matching, expected string", - }, { + }, + { input: `some_metric{a:b="b"}`, fail: true, errMsg: "unexpected character inside braces: ':'", - }, { + }, + { input: `foo{a*"b"}`, fail: true, errMsg: "unexpected character inside braces: '*'", - }, { + }, + { input: `foo{a>="b"}`, fail: true, // TODO(fabxc): willingly lexing wrong tokens allows for more precise error // messages from the parser - consider if this is an option. errMsg: "unexpected character inside braces: '>'", - }, { + }, + { input: "some_metric{a=\"\xff\"}", fail: true, errMsg: "1:15: parse error: invalid UTF-8 rune", - }, { + }, + { input: `foo{gibberish}`, fail: true, errMsg: `unexpected "}" in label matching, expected label matching operator`, - }, { + }, + { input: `foo{1}`, fail: true, errMsg: "unexpected character inside braces: '1'", - }, { + }, + { input: `{}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x=""}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x=~".*"}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x!~".+"}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x!="a"}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `foo{__name__="bar"}`, fail: true, errMsg: `metric name must not be set twice: "foo" or "bar"`, - }, { + }, + { input: `foo{__name__= =}`, fail: true, errMsg: `1:15: parse error: unexpected "=" in label matching, expected string`, - }, { + }, + { input: `foo{,}`, fail: true, errMsg: `unexpected "," in label matching, expected identifier or "}"`, - }, { + }, + { input: `foo{__name__ == "bar"}`, fail: true, errMsg: `1:15: parse error: unexpected "=" in label matching, expected string`, - }, { + }, + { input: `foo{__name__="bar" lol}`, fail: true, errMsg: `unexpected identifier "lol" in label matching, expected "," or "}"`, @@ -1739,7 +1884,8 @@ var testExpr = []struct { Range: 5 * time.Second, EndPos: 8, }, - }, { + }, + { input: "test[5m]", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1755,7 +1901,8 @@ var testExpr = []struct { Range: 5 * time.Minute, EndPos: 8, }, - }, { + }, + { input: `foo[5m30s]`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1771,7 +1918,8 @@ var testExpr = []struct { Range: 5*time.Minute + 30*time.Second, EndPos: 10, }, - }, { + }, + { input: "test[5h] OFFSET 5m", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1788,7 +1936,8 @@ var testExpr = []struct { Range: 5 * time.Hour, EndPos: 18, }, - }, { + }, + { input: "test[5d] OFFSET 10s", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1805,7 +1954,8 @@ var testExpr = []struct { Range: 5 * 24 * time.Hour, EndPos: 19, }, - }, { + }, + { input: "test[5w] offset 2w", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1822,7 +1972,8 @@ var testExpr = []struct { Range: 5 * 7 * 24 * time.Hour, EndPos: 18, }, - }, { + }, + { input: `test{a="b"}[5y] OFFSET 3d`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1840,7 +1991,8 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 25, }, - }, { + }, + { input: `test{a="b"}[5y] @ 1603774699`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1858,70 +2010,87 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 28, }, - }, { + }, + { input: `foo[5mm]`, fail: true, errMsg: "bad duration syntax: \"5mm\"", - }, { + }, + { input: `foo[5m1]`, fail: true, errMsg: "bad duration syntax: \"5m1\"", - }, { + }, + { input: `foo[5m:1m1]`, fail: true, errMsg: "bad number or duration syntax: \"1m1\"", - }, { + }, + { input: `foo[5y1hs]`, fail: true, errMsg: "not a valid duration string: \"5y1hs\"", - }, { + }, + { input: `foo[5m1h]`, fail: true, errMsg: "not a valid duration string: \"5m1h\"", - }, { + }, + { input: `foo[5m1m]`, fail: true, errMsg: "not a valid duration string: \"5m1m\"", - }, { + }, + { input: `foo[0m]`, fail: true, errMsg: "duration must be greater than 0", - }, { + }, + { input: `foo["5m"]`, fail: true, - }, { + }, + { input: `foo[]`, fail: true, errMsg: "missing unit character in duration", - }, { + }, + { input: `foo[1]`, fail: true, errMsg: "missing unit character in duration", - }, { + }, + { input: `some_metric[5m] OFFSET 1`, fail: true, errMsg: "unexpected number \"1\" in offset, expected duration", - }, { + }, + { input: `some_metric[5m] OFFSET 1mm`, fail: true, errMsg: "bad number or duration syntax: \"1mm\"", - }, { + }, + { input: `some_metric[5m] OFFSET`, fail: true, errMsg: "unexpected end of input in offset, expected duration", - }, { + }, + { input: `some_metric OFFSET 1m[5m]`, fail: true, errMsg: "1:22: parse error: no offset modifiers allowed before range", - }, { + }, + { input: `some_metric[5m] @ 1m`, fail: true, errMsg: "1:19: parse error: unexpected duration \"1m\" in @, expected timestamp", - }, { + }, + { input: `some_metric[5m] @`, fail: true, errMsg: "1:18: parse error: unexpected end of input in @, expected timestamp", - }, { + }, + { input: `some_metric @ 1234 [5m]`, fail: true, errMsg: "1:20: parse error: no @ modifiers allowed before range", @@ -1952,7 +2121,8 @@ var testExpr = []struct { End: 25, }, }, - }, { + }, + { input: "avg by (foo)(some_metric)", expected: &AggregateExpr{ Op: AVG, @@ -1972,7 +2142,8 @@ var testExpr = []struct { End: 25, }, }, - }, { + }, + { input: "max by (foo)(some_metric)", expected: &AggregateExpr{ Op: MAX, @@ -1992,7 +2163,8 @@ var testExpr = []struct { End: 25, }, }, - }, { + }, + { input: "sum without (foo) (some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2013,7 +2185,8 @@ var testExpr = []struct { End: 31, }, }, - }, { + }, + { input: "sum (some_metric) without (foo)", expected: &AggregateExpr{ Op: SUM, @@ -2034,7 +2207,8 @@ var testExpr = []struct { End: 31, }, }, - }, { + }, + { input: "stddev(some_metric)", expected: &AggregateExpr{ Op: STDDEV, @@ -2053,7 +2227,8 @@ var testExpr = []struct { End: 19, }, }, - }, { + }, + { input: "stdvar by (foo)(some_metric)", expected: &AggregateExpr{ Op: STDVAR, @@ -2073,7 +2248,8 @@ var testExpr = []struct { End: 28, }, }, - }, { + }, + { input: "sum by ()(some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2093,7 +2269,8 @@ var testExpr = []struct { End: 22, }, }, - }, { + }, + { input: "sum by (foo,bar,)(some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2113,7 +2290,8 @@ var testExpr = []struct { End: 30, }, }, - }, { + }, + { input: "sum by (foo,)(some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2133,7 +2311,8 @@ var testExpr = []struct { End: 26, }, }, - }, { + }, + { input: "topk(5, some_metric)", expected: &AggregateExpr{ Op: TOPK, @@ -2159,7 +2338,8 @@ var testExpr = []struct { End: 20, }, }, - }, { + }, + { input: `count_values("value", some_metric)`, expected: &AggregateExpr{ Op: COUNT_VALUES, @@ -2185,7 +2365,8 @@ var testExpr = []struct { End: 34, }, }, - }, { + }, + { // Test usage of keywords as label names. input: "sum without(and, by, avg, count, alert, annotations)(some_metric)", expected: &AggregateExpr{ @@ -2207,67 +2388,83 @@ var testExpr = []struct { End: 65, }, }, - }, { + }, + { input: "sum without(==)(some_metric)", fail: true, errMsg: "unexpected in grouping opts, expected label", - }, { + }, + { input: "sum without(,)(some_metric)", fail: true, errMsg: `unexpected "," in grouping opts, expected label`, - }, { + }, + { input: "sum without(foo,,)(some_metric)", fail: true, errMsg: `unexpected "," in grouping opts, expected label`, - }, { + }, + { input: `sum some_metric by (test)`, fail: true, errMsg: "unexpected identifier \"some_metric\"", - }, { + }, + { input: `sum (some_metric) by test`, fail: true, errMsg: "unexpected identifier \"test\" in grouping opts", - }, { + }, + { input: `sum (some_metric) by test`, fail: true, errMsg: "unexpected identifier \"test\" in grouping opts", - }, { + }, + { input: `sum () by (test)`, fail: true, errMsg: "no arguments for aggregate expression provided", - }, { + }, + { input: "MIN keep_common (some_metric)", fail: true, errMsg: "1:5: parse error: unexpected identifier \"keep_common\"", - }, { + }, + { input: "MIN (some_metric) keep_common", fail: true, errMsg: `unexpected identifier "keep_common"`, - }, { + }, + { input: `sum (some_metric) without (test) by (test)`, fail: true, errMsg: "unexpected ", - }, { + }, + { input: `sum without (test) (some_metric) by (test)`, fail: true, errMsg: "unexpected ", - }, { + }, + { input: `topk(some_metric)`, fail: true, errMsg: "wrong number of arguments for aggregate expression provided, expected 2, got 1", - }, { + }, + { input: `topk(some_metric,)`, fail: true, errMsg: "trailing commas not allowed in function call args", - }, { + }, + { input: `topk(some_metric, other_metric)`, fail: true, errMsg: "1:6: parse error: expected type scalar in aggregation parameter, got instant vector", - }, { + }, + { input: `count_values(5, other_metric)`, fail: true, errMsg: "1:14: parse error: expected type string in aggregation parameter, got scalar", - }, { + }, + { input: `rate(some_metric[5m]) @ 1234`, fail: true, errMsg: "1:1: parse error: @ modifier must be preceded by an instant vector selector or range vector selector or a subquery", @@ -2283,7 +2480,8 @@ var testExpr = []struct { End: 6, }, }, - }, { + }, + { input: `floor(some_metric{foo!="bar"})`, expected: &Call{ Func: MustGetFunction("floor"), @@ -2305,7 +2503,8 @@ var testExpr = []struct { End: 30, }, }, - }, { + }, + { input: "rate(some_metric[5m])", expected: &Call{ Func: MustGetFunction("rate"), @@ -2330,7 +2529,8 @@ var testExpr = []struct { End: 21, }, }, - }, { + }, + { input: "round(some_metric)", expected: &Call{ Func: MustGetFunction("round"), @@ -2351,7 +2551,8 @@ var testExpr = []struct { End: 18, }, }, - }, { + }, + { input: "round(some_metric, 5)", expected: &Call{ Func: MustGetFunction("round"), @@ -2379,39 +2580,48 @@ var testExpr = []struct { End: 21, }, }, - }, { + }, + { input: "floor()", fail: true, errMsg: "expected 1 argument(s) in call to \"floor\", got 0", - }, { + }, + { input: "floor(some_metric, other_metric)", fail: true, errMsg: "expected 1 argument(s) in call to \"floor\", got 2", - }, { + }, + { input: "floor(some_metric, 1)", fail: true, errMsg: "expected 1 argument(s) in call to \"floor\", got 2", - }, { + }, + { input: "floor(1)", fail: true, errMsg: "expected type instant vector in call to function \"floor\", got scalar", - }, { + }, + { input: "hour(some_metric, some_metric, some_metric)", fail: true, errMsg: "expected at most 1 argument(s) in call to \"hour\", got 3", - }, { + }, + { input: "time(some_metric)", fail: true, errMsg: "expected 0 argument(s) in call to \"time\", got 1", - }, { + }, + { input: "non_existent_function_far_bar()", fail: true, errMsg: "unknown function with name \"non_existent_function_far_bar\"", - }, { + }, + { input: "rate(some_metric)", fail: true, errMsg: "expected type range vector in call to function \"rate\", got instant vector", - }, { + }, + { input: "label_replace(a, `b`, `c\xff`, `d`, `.*`)", fail: true, errMsg: "1:23: parse error: invalid UTF-8 rune", @@ -2421,28 +2631,34 @@ var testExpr = []struct { input: "-=", fail: true, errMsg: `unexpected "="`, - }, { + }, + { input: "++-++-+-+-<", fail: true, errMsg: `unexpected `, - }, { + }, + { input: "e-+=/(0)", fail: true, errMsg: `unexpected "="`, - }, { + }, + { input: "a>b()", fail: true, errMsg: `unknown function`, - }, { + }, + { input: "rate(avg)", fail: true, errMsg: `expected type range vector`, - }, { + }, + { // This is testing that we are not re-rendering the expression string for each error, which would timeout. input: "(" + strings.Repeat("-{}-1", 10000) + ")" + strings.Repeat("[1m:]", 1000), fail: true, errMsg: `1:3: parse error: vector selector must contain at least one non-empty matcher`, - }, { + }, + { input: "sum(sum)", expected: &AggregateExpr{ Op: SUM, @@ -2461,7 +2677,8 @@ var testExpr = []struct { End: 8, }, }, - }, { + }, + { input: "a + sum", expected: &BinaryExpr{ Op: ADD, @@ -2495,49 +2712,58 @@ var testExpr = []struct { Val: "double-quoted string \" with escaped quote", PosRange: PositionRange{Start: 0, End: 44}, }, - }, { + }, + { input: `'single-quoted string \' with escaped quote'`, expected: &StringLiteral{ Val: "single-quoted string ' with escaped quote", PosRange: PositionRange{Start: 0, End: 44}, }, - }, { + }, + { input: "`backtick-quoted string`", expected: &StringLiteral{ Val: "backtick-quoted string", PosRange: PositionRange{Start: 0, End: 24}, }, - }, { + }, + { input: `"\a\b\f\n\r\t\v\\\" - \xFF\377\u1234\U00010111\U0001011111☺"`, expected: &StringLiteral{ Val: "\a\b\f\n\r\t\v\\\" - \xFF\377\u1234\U00010111\U0001011111☺", PosRange: PositionRange{Start: 0, End: 62}, }, - }, { + }, + { input: `'\a\b\f\n\r\t\v\\\' - \xFF\377\u1234\U00010111\U0001011111☺'`, expected: &StringLiteral{ Val: "\a\b\f\n\r\t\v\\' - \xFF\377\u1234\U00010111\U0001011111☺", PosRange: PositionRange{Start: 0, End: 62}, }, - }, { + }, + { input: "`" + `\a\b\f\n\r\t\v\\\"\' - \xFF\377\u1234\U00010111\U0001011111☺` + "`", expected: &StringLiteral{ Val: `\a\b\f\n\r\t\v\\\"\' - \xFF\377\u1234\U00010111\U0001011111☺`, PosRange: PositionRange{Start: 0, End: 64}, }, - }, { + }, + { input: "`\\``", fail: true, errMsg: "unterminated raw string", - }, { + }, + { input: `"\`, fail: true, errMsg: "escape sequence not terminated", - }, { + }, + { input: `"\c"`, fail: true, errMsg: "unknown escape sequence U+0063 'c'", - }, { + }, + { input: `"\x."`, fail: true, errMsg: "illegal character U+002E '.' in escape sequence", @@ -2580,7 +2806,8 @@ var testExpr = []struct { Step: time.Hour + 6*time.Millisecond, EndPos: 27, }, - }, { + }, + { input: `foo[10m:]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2596,7 +2823,8 @@ var testExpr = []struct { Range: 10 * time.Minute, EndPos: 9, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:5s])`, expected: &Call{ Func: MustGetFunction("min_over_time"), @@ -2637,7 +2865,8 @@ var testExpr = []struct { End: 46, }, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:])[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2681,7 +2910,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 51, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] offset 4m)[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2726,7 +2956,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 61, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] @ 1603775091)[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2771,7 +3002,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 64, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] @ -160377509)[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2816,7 +3048,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 64, }, - }, { + }, + { input: "sum without(and, by, avg, count, alert, annotations)(some_metric) [30m:10s]", expected: &SubqueryExpr{ Expr: &AggregateExpr{ @@ -2842,7 +3075,8 @@ var testExpr = []struct { Step: 10 * time.Second, EndPos: 75, }, - }, { + }, + { input: `some_metric OFFSET 1m [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2860,7 +3094,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 30, }, - }, { + }, + { input: `some_metric @ 123 [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2878,7 +3113,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 26, }, - }, { + }, + { input: `some_metric @ 123 offset 1m [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2897,7 +3133,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 36, }, - }, { + }, + { input: `some_metric offset 1m @ 123 [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2916,7 +3153,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 36, }, - }, { + }, + { input: `some_metric[10m:5s] offset 1m @ 123`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2935,7 +3173,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 35, }, - }, { + }, + { input: `(foo + bar{nm="val"})[5m:]`, expected: &SubqueryExpr{ Expr: &ParenExpr{ @@ -2974,7 +3213,8 @@ var testExpr = []struct { Range: 5 * time.Minute, EndPos: 26, }, - }, { + }, + { input: `(foo + bar{nm="val"})[5m:] offset 10m`, expected: &SubqueryExpr{ Expr: &ParenExpr{ @@ -3014,7 +3254,8 @@ var testExpr = []struct { OriginalOffset: 10 * time.Minute, EndPos: 37, }, - }, { + }, + { input: `(foo + bar{nm="val"} @ 1234)[5m:] @ 1603775019`, expected: &SubqueryExpr{ Expr: &ParenExpr{ @@ -3055,19 +3296,23 @@ var testExpr = []struct { Timestamp: makeInt64Pointer(1603775019000), EndPos: 46, }, - }, { + }, + { input: "test[5d] OFFSET 10s [10m:5s]", fail: true, errMsg: "1:1: parse error: subquery is only allowed on instant vector, got matrix", - }, { + }, + { input: `(foo + bar{nm="val"})[5m:][10m:5s]`, fail: true, errMsg: `1:1: parse error: subquery is only allowed on instant vector, got matrix`, - }, { + }, + { input: "rate(food[1m])[1h] offset 1h", fail: true, errMsg: `1:15: parse error: ranges only allowed for vector selectors`, - }, { + }, + { input: "rate(food[1m])[1h] @ 100", fail: true, errMsg: `1:15: parse error: ranges only allowed for vector selectors`, @@ -3086,7 +3331,8 @@ var testExpr = []struct { End: 13, }, }, - }, { + }, + { input: `foo @ end()`, expected: &VectorSelector{ Name: "foo", @@ -3099,7 +3345,8 @@ var testExpr = []struct { End: 11, }, }, - }, { + }, + { input: `test[5y] @ start()`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -3116,7 +3363,8 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 18, }, - }, { + }, + { input: `test[5y] @ end()`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -3133,7 +3381,8 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 16, }, - }, { + }, + { input: `foo[10m:6s] @ start()`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -3151,7 +3400,8 @@ var testExpr = []struct { StartOrEnd: START, EndPos: 21, }, - }, { + }, + { input: `foo[10m:6s] @ end()`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -3169,11 +3419,13 @@ var testExpr = []struct { StartOrEnd: END, EndPos: 19, }, - }, { + }, + { input: `start()`, fail: true, errMsg: `1:6: parse error: unexpected "("`, - }, { + }, + { input: `end()`, fail: true, errMsg: `1:4: parse error: unexpected "("`, @@ -3191,7 +3443,8 @@ var testExpr = []struct { End: 5, }, }, - }, { + }, + { input: `end`, expected: &VectorSelector{ Name: "end", @@ -3203,7 +3456,8 @@ var testExpr = []struct { End: 3, }, }, - }, { + }, + { input: `start{end="foo"}`, expected: &VectorSelector{ Name: "start", @@ -3216,7 +3470,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `end{start="foo"}`, expected: &VectorSelector{ Name: "end", @@ -3229,7 +3484,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `foo unless on(start) bar`, expected: &BinaryExpr{ Op: LUNLESS, @@ -3259,7 +3515,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: `foo unless on(end) bar`, expected: &BinaryExpr{ Op: LUNLESS, diff --git a/promql/query_logger.go b/promql/query_logger.go index cf2fbbfcc8..ecf93765ce 100644 --- a/promql/query_logger.go +++ b/promql/query_logger.go @@ -81,8 +81,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { } func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) { - - file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0666) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err) return nil, err @@ -104,7 +103,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, er } func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { - err := os.MkdirAll(localStoragePath, 0777) + err := os.MkdirAll(localStoragePath, 0o777) if err != nil { level.Error(logger).Log("msg", "Failed to create directory for logging active queries") } @@ -147,7 +146,6 @@ func trimStringByBytes(str string, size int) string { func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte { entry := Entry{query, timestamp} jsonEntry, err := json.Marshal(entry) - if err != nil { level.Error(logger).Log("msg", "Cannot create json of query", "query", query) return []byte{} diff --git a/rules/alerting.go b/rules/alerting.go index 28abcf2a5b..524d80afef 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -311,7 +311,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, resultFPs := map[uint64]struct{}{} var vec promql.Vector - var alerts = make(map[uint64]*Alert, len(res)) + alerts := make(map[uint64]*Alert, len(res)) for _, smpl := range res { // Provide the alert information to the template. l := make(map[string]string, len(smpl.Metric)) @@ -479,7 +479,7 @@ func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) { } } -func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) { +func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay, interval time.Duration, notifyFunc NotifyFunc) { alerts := []*Alert{} r.ForEachActiveAlert(func(alert *Alert) { if alert.needsSending(ts, resendDelay) { diff --git a/rules/manager.go b/rules/manager.go index fa8cd6763a..2ab7eb62da 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -834,12 +834,10 @@ func (g *Group) RestoreForState(ts time.Time) { level.Debug(g.logger).Log("msg", "'for' state restored", labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850), "labels", a.Labels.String()) - }) alertRule.SetRestored(true) } - } // Equals return if two groups are the same. diff --git a/rules/manager_test.go b/rules/manager_test.go index f3b3350706..b704098429 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -119,17 +119,19 @@ func TestAlertingRule(t *testing.T) { baseTime := time.Unix(0, 0) - var tests = []struct { + tests := []struct { time time.Duration result promql.Vector }{ { time: 0, result: result[:2], - }, { + }, + { time: 5 * time.Minute, result: result[2:], - }, { + }, + { time: 10 * time.Minute, result: result[2:3], }, @@ -256,7 +258,7 @@ func TestForStateAddSamples(t *testing.T) { baseTime := time.Unix(0, 0) - var tests = []struct { + tests := []struct { time time.Duration result promql.Vector persistThisTime bool // If true, it means this 'time' is persisted for 'for'. @@ -769,7 +771,6 @@ func TestUpdate(t *testing.T) { } else { rgs.Groups[i].Interval = model.Duration(10) } - } reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 1e0c68d6d5..3b34f14165 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -109,7 +109,6 @@ func TestDroppedTargetsList(t *testing.T) { // TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated // even when new labels don't affect the target `hash`. func TestDiscoveredLabelsUpdate(t *testing.T) { - sp := &scrapePool{} // These are used when syncing so need this to avoid a panic. sp.config = &config.ScrapeConfig{ @@ -350,7 +349,7 @@ func TestScrapePoolTargetLimit(t *testing.T) { client: http.DefaultClient, } - var tgs = []*targetgroup.Group{} + tgs := []*targetgroup.Group{} for i := 0; i < 50; i++ { tgs = append(tgs, &targetgroup.Group{ @@ -1000,6 +999,7 @@ func BenchmarkScrapeLoopAppend(b *testing.B) { _, _, _, _ = sl.append(slApp, metrics, "", ts) } } + func BenchmarkScrapeLoopAppendOM(b *testing.B) { ctx, sl := simpleTestScrapeLoop(b) @@ -1409,8 +1409,10 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { "Two target labels collide with existing labels, both with and without prefix 'exported'": { targetLabels: []string{"foo", "3", "exported_foo", "4"}, exposedLabels: `metric{foo="1" exported_foo="2"} 0`, - expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo", - "2", "exported_foo", "4", "foo", "3"}, + expected: []string{ + "__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo", + "2", "exported_foo", "4", "foo", "3", + }, }, "Extreme example": { targetLabels: []string{"foo", "0", "exported_exported_foo", "1", "exported_exported_exported_foo", "2"}, @@ -1743,7 +1745,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { exemplars: []exemplar.Exemplar{ {Labels: labels.FromStrings("a", "abc"), Value: 1}, }, - }, { + }, + { title: "Metric with exemplars and TS", scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF", discoveryLabels: []string{"n", "2"}, @@ -1754,7 +1757,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { exemplars: []exemplar.Exemplar{ {Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true}, }, - }, { + }, + { title: "Two metrics and exemplars", scrapeText: `metric_total{n="1"} 1 # {t="1"} 1.0 10000 metric_total{n="2"} 2 # {t="2"} 2.0 20000 @@ -2040,7 +2044,6 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { require.Equal(t, 1, total) require.Equal(t, 1, added) require.Equal(t, 0, seriesAdded) - } func TestTargetScraperScrapeOK(t *testing.T) { diff --git a/storage/buffer.go b/storage/buffer.go index feca1d91ed..68cda5f946 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -275,7 +275,7 @@ func (r *sampleRing) nthLast(n int) (int64, float64, bool) { func (r *sampleRing) samples() []sample { res := make([]sample, r.l) - var k = r.f + r.l + k := r.f + r.l var j int if k > len(r.buf) { k = len(r.buf) diff --git a/storage/merge.go b/storage/merge.go index 2a68ad96a2..49757e2c39 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -42,7 +42,7 @@ type mergeGenericQuerier struct { // See NewFanout commentary to learn more about primary vs secondary differences. // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. -func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { +func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { if _, ok := q.(noopQuerier); !ok && q != nil { @@ -71,7 +71,7 @@ func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn Vertica // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 -func NewMergeChunkQuerier(primaries []ChunkQuerier, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { +func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { if _, ok := q.(noopChunkQuerier); !ok && q != nil { @@ -104,7 +104,7 @@ func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matche return q.queriers[0].Select(sortSeries, hints, matchers...) } - var seriesSets = make([]genericSeriesSet, 0, len(q.queriers)) + seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) if !q.concurrentSelect { for _, querier := range q.queriers { // We need to sort for merge to work. @@ -265,7 +265,6 @@ func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) Seri genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericSeriesSetAdapter{s}) - } return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)} } @@ -281,7 +280,6 @@ func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, mergeFunc VerticalChunkSeries genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s}) - } return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)} } diff --git a/storage/merge_test.go b/storage/merge_test.go index 23eab0f70d..0aabcdb96c 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -616,7 +616,8 @@ func TestChainSampleIterator(t *testing.T) { NewListSeriesIterator(samples{sample{2, 2}, sample{5, 5}}), }, expected: []tsdbutil.Sample{ - sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}}, + sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}, + }, }, // Overlap. { diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 9ac3b926b5..8664555a5d 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -728,7 +728,7 @@ func (t *QueueManager) releaseLabels(ls labels.Labels) { // processExternalLabels merges externalLabels into ls. If ls contains // a label in externalLabels, the value in ls wins. -func processExternalLabels(ls labels.Labels, externalLabels labels.Labels) labels.Labels { +func processExternalLabels(ls, externalLabels labels.Labels) labels.Labels { i, j, result := 0, 0, make(labels.Labels, 0, len(ls)+len(externalLabels)) for i < len(ls) && j < len(externalLabels) { if ls[i].Name < externalLabels[j].Name { @@ -1048,7 +1048,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface max += int(float64(max) * 0.1) } - var pendingData = make([]prompb.TimeSeries, max) + pendingData := make([]prompb.TimeSeries, max) for i := range pendingData { pendingData[i].Samples = []prompb.Sample{{}} if s.qm.sendExemplars { @@ -1142,7 +1142,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface } } -func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) { +func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) { begin := time.Now() err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf) if err != nil { @@ -1159,7 +1159,7 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s } // sendSamples to the remote storage with backoff for recoverable errors. -func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error { +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error { // Build the WriteRequest with no metadata. req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf) if err != nil { diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index f56b6d90c6..9a07f7c997 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -60,7 +60,6 @@ func newHighestTimestampMetric() *maxTimestamp { } func TestSampleDelivery(t *testing.T) { - testcases := []struct { name string samples bool @@ -107,7 +106,6 @@ func TestSampleDelivery(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - var ( series []record.RefSeries samples []record.RefSample @@ -715,7 +713,7 @@ func BenchmarkSampleDelivery(b *testing.B) { const numSeries = 10000 // Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics. - var extraLabels = labels.Labels{ + extraLabels := labels.Labels{ {Name: "kubernetes_io_arch", Value: "amd64"}, {Name: "kubernetes_io_instance_type", Value: "c3.somesize"}, {Name: "kubernetes_io_os", Value: "linux"}, diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index 646d00c466..67c0f6f744 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -506,7 +506,6 @@ func TestSampleAndChunkQueryableClient(t *testing.T) { } require.NoError(t, ss.Err()) require.Equal(t, tc.expectedSeries, got) - }) } } diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 92637cf471..ba8b3446bc 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -81,9 +81,7 @@ func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, } func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { - var ( - outOfOrderExemplarErrs = 0 - ) + outOfOrderExemplarErrs := 0 app := h.appendable.Appender(ctx) defer func() { diff --git a/template/template.go b/template/template.go index dca5aa432a..a8abda8f07 100644 --- a/template/template.go +++ b/template/template.go @@ -87,7 +87,7 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer // promql.Vector is hard to work with in templates, so convert to // base data types. // TODO(fabxc): probably not true anymore after type rework. - var result = make(queryResult, len(vector)) + result := make(queryResult, len(vector)) for n, v := range vector { s := sample{ Value: v.V, @@ -301,7 +301,7 @@ func NewTemplateExpander( } // AlertTemplateData returns the interface to be used in expanding the template. -func AlertTemplateData(labels map[string]string, externalLabels map[string]string, externalURL string, value float64) interface{} { +func AlertTemplateData(labels, externalLabels map[string]string, externalURL string, value float64) interface{} { return struct { Labels map[string]string ExternalLabels map[string]string diff --git a/template/template_test.go b/template/template_test.go index 6c67789ad5..8cb657867a 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -87,7 +87,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "11", }, { @@ -98,7 +99,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "a", }, { @@ -108,7 +110,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "__value__", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "a", }, { @@ -118,7 +121,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "", }, { @@ -128,7 +132,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "", }, { @@ -137,7 +142,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "", html: true, }, @@ -151,7 +157,8 @@ func TestTemplateExpansion(t *testing.T) { }, { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "a:11: b:21: ", }, { diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 40e192f5c9..30d7793559 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -36,9 +36,7 @@ import ( "go.uber.org/atomic" ) -var ( - ErrUnsupported = errors.New("unsupported operation with WAL-only storage") -) +var ErrUnsupported = errors.New("unsupported operation with WAL-only storage") // Default values for options. var ( diff --git a/tsdb/agent/series.go b/tsdb/agent/series.go index 71b3ca2e26..557f709bbf 100644 --- a/tsdb/agent/series.go +++ b/tsdb/agent/series.go @@ -54,7 +54,7 @@ func (m seriesHashmap) Set(hash uint64, s *memSeries) { m[hash] = append(seriesSet, s) } -func (m seriesHashmap) Delete(hash uint64, ref uint64) { +func (m seriesHashmap) Delete(hash, ref uint64) { var rem []*memSeries for _, s := range m[hash] { if s.ref != ref { diff --git a/tsdb/block.go b/tsdb/block.go index 42a91ff593..88c0a30f5f 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -191,9 +191,11 @@ type BlockMetaCompaction struct { Failed bool `json:"failed,omitempty"` } -const indexFilename = "index" -const metaFilename = "meta.json" -const metaVersion1 = 1 +const ( + indexFilename = "index" + metaFilename = "meta.json" + metaVersion1 = 1 +) func chunkDir(dir string) string { return filepath.Join(dir, "chunks") } @@ -611,12 +613,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er // Snapshot creates snapshot of the block into dir. func (pb *Block) Snapshot(dir string) error { blockDir := filepath.Join(dir, pb.meta.ULID.String()) - if err := os.MkdirAll(blockDir, 0777); err != nil { + if err := os.MkdirAll(blockDir, 0o777); err != nil { return errors.Wrap(err, "create snapshot block dir") } chunksDir := chunkDir(blockDir) - if err := os.MkdirAll(chunksDir, 0777); err != nil { + if err := os.MkdirAll(chunksDir, 0o777); err != nil { return errors.Wrap(err, "create snapshot chunk dir") } diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 54cfbc2c48..88ca3f1ebb 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -185,7 +185,7 @@ func TestCorruptedChunk(t *testing.T) { require.NoError(t, err) require.Greater(t, len(files), 0, "No chunk created.") - f, err := os.OpenFile(files[0], os.O_RDWR, 0666) + f, err := os.OpenFile(files[0], os.O_RDWR, 0o666) require.NoError(t, err) // Apply corruption function. @@ -505,7 +505,7 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string { compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil) require.NoError(tb, err) - require.NoError(tb, os.MkdirAll(dir, 0777)) + require.NoError(tb, os.MkdirAll(dir, 0o777)) // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go index 11417c38cf..067c9ebc1e 100644 --- a/tsdb/chunks/chunks.go +++ b/tsdb/chunks/chunks.go @@ -97,9 +97,7 @@ func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool { return cm.MinTime <= maxt && mint <= cm.MaxTime } -var ( - errInvalidSize = fmt.Errorf("invalid size") -) +var errInvalidSize = fmt.Errorf("invalid size") var castagnoliTable *crc32.Table @@ -148,7 +146,7 @@ func newWriter(dir string, segmentSize int64) (*Writer, error) { segmentSize = DefaultChunkSegmentSize } - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } dirFile, err := fileutil.OpenDir(dir) @@ -224,7 +222,7 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all return 0, nil, 0, errors.Wrap(err, "next sequence file") } ptmp := p + ".tmp" - f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0666) + f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0o666) if err != nil { return 0, nil, 0, errors.Wrap(err, "open temp file") } @@ -266,7 +264,7 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all return 0, nil, 0, errors.Wrap(err, "replace file") } - f, err = os.OpenFile(p, os.O_WRONLY, 0666) + f, err = os.OpenFile(p, os.O_WRONLY, 0o666) if err != nil { return 0, nil, 0, errors.Wrap(err, "open final file") } @@ -355,7 +353,7 @@ func (w *Writer) writeChunks(chks []Meta) error { return nil } - var seq = uint64(w.seq()) << 32 + seq := uint64(w.seq()) << 32 for i := range chks { chk := &chks[i] diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index f4ac61fcc8..cc6c27be34 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -42,11 +42,9 @@ const ( headChunksFormatV1 = 1 ) -var ( - // ErrChunkDiskMapperClosed returned by any method indicates - // that the ChunkDiskMapper was closed. - ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed") -) +// ErrChunkDiskMapperClosed returned by any method indicates +// that the ChunkDiskMapper was closed. +var ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed") const ( // MintMaxtSize is the size of the mint/maxt for head chunk file and chunks. @@ -83,7 +81,6 @@ func (ref ChunkDiskMapperRef) Unpack() (sgmIndex, chkStart int) { sgmIndex = int(ref >> 32) chkStart = int((ref << 32) >> 32) return sgmIndex, chkStart - } // CorruptionErr is an error that's returned when corruption is encountered. @@ -152,7 +149,7 @@ func NewChunkDiskMapper(dir string, pool chunkenc.Pool, writeBufferSize int) (*C return nil, errors.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize) } - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } dirFile, err := fileutil.OpenDir(dir) diff --git a/tsdb/chunks/head_chunks_other.go b/tsdb/chunks/head_chunks_other.go index 8b37dd8c25..cd0e258a1a 100644 --- a/tsdb/chunks/head_chunks_other.go +++ b/tsdb/chunks/head_chunks_other.go @@ -16,8 +16,6 @@ package chunks -var ( - // HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. - // Windows needs pre-allocations while the other OS does not. - HeadChunkFilePreallocationSize int64 -) +// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. +// Windows needs pre-allocations while the other OS does not. +var HeadChunkFilePreallocationSize int64 diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index f1aa13cecb..873a00f67b 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -379,7 +379,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { // Write an empty last file mimicking an abrupt shutdown on file creation. emptyFileName := segmentFile(dir, lastFile+1) - f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0666) + f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0o666) require.NoError(t, err) require.NoError(t, f.Sync()) stat, err := f.Stat() @@ -409,7 +409,6 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { require.NoError(t, err) require.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file") } - } func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper { diff --git a/tsdb/chunks/head_chunks_windows.go b/tsdb/chunks/head_chunks_windows.go index b772b64b46..214ee42f59 100644 --- a/tsdb/chunks/head_chunks_windows.go +++ b/tsdb/chunks/head_chunks_windows.go @@ -13,8 +13,6 @@ package chunks -var ( - // HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. - // Windows needs pre-allocation to m-map the file. - HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize -) +// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. +// Windows needs pre-allocation to m-map the file. +var HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize diff --git a/tsdb/compact.go b/tsdb/compact.go index b2ae7e4ea5..ce197f09a6 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -564,7 +564,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe return err } - if err = os.MkdirAll(tmp, 0777); err != nil { + if err = os.MkdirAll(tmp, 0o777); err != nil { return err } diff --git a/tsdb/db.go b/tsdb/db.go index c69f6f464f..7326b904bc 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -65,10 +65,8 @@ const ( lockfileCreatedCleanly = 1 ) -var ( - // ErrNotReady is returned if the underlying storage is not ready yet. - ErrNotReady = errors.New("TSDB not ready") -) +// ErrNotReady is returned if the underlying storage is not ready yet. +var ErrNotReady = errors.New("TSDB not ready") // DefaultOptions used for the DB. They are sane for setups using // millisecond precision timestamps. @@ -609,7 +607,7 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { } func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } if l == nil { @@ -1642,7 +1640,7 @@ func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, err return db.head.exemplars.ExemplarQuerier(ctx) } -func rangeForTimestamp(t int64, width int64) (maxt int64) { +func rangeForTimestamp(t, width int64) (maxt int64) { return (t/width)*width + width } diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 88342c8f8d..3f657152f0 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -228,7 +228,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { { walFiles, err := ioutil.ReadDir(path.Join(db.Dir(), "wal")) require.NoError(t, err) - f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0666) + f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0o666) require.NoError(t, err) r := wal.NewReader(bufio.NewReader(f)) require.True(t, r.Next(), "reading the series record") @@ -1245,7 +1245,6 @@ func TestTombstoneCleanRetentionLimitsRace(t *testing.T) { require.NoError(t, db.Close()) } - } func intersection(oldBlocks, actualBlocks []string) (intersection []string) { @@ -1272,6 +1271,7 @@ type mockCompactorFailing struct { func (*mockCompactorFailing) Plan(dir string) ([]string, error) { return nil, nil } + func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) { if len(c.blocks) >= c.max { return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") @@ -1559,7 +1559,7 @@ func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, map[string][]sample func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { // Create 10 blocks that does not overlap (0-10, 10-20, ..., 100-110) but in reverse order to ensure our algorithm // will handle that. - var metas = make([]BlockMeta, 11) + metas := make([]BlockMeta, 11) for i := 10; i >= 0; i-- { metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)} } @@ -1781,7 +1781,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { require.NoError(t, os.RemoveAll(dir)) }() - require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0777)) + require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) w, err := wal.New(nil, nil, path.Join(dir, "wal"), false) require.NoError(t, err) @@ -1831,7 +1831,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { createBlock(t, dir, genSeries(1, 1, 1000, 6000)) - require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0777)) + require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) w, err := wal.New(nil, nil, path.Join(dir, "wal"), false) require.NoError(t, err) @@ -2663,7 +2663,6 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) { for i, test := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { - tempDir, err := ioutil.TempDir("", "test_chunk_writer") require.NoError(t, err) defer func() { require.NoError(t, os.RemoveAll(tempDir)) }() @@ -2899,7 +2898,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { expectedLoadedDirs[outDir] = struct{}{} // Touch chunks dir in block. - require.NoError(t, os.MkdirAll(filepath.Join(dbDir, "chunks"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dbDir, "chunks"), 0o777)) defer func() { require.NoError(t, os.RemoveAll(filepath.Join(dbDir, "chunks"))) }() @@ -3166,7 +3165,7 @@ func TestLockfileMetric(t *testing.T) { // Test preconditions (file already exists + lockfile option) lockfilePath := filepath.Join(absdir, "lock") if c.fileAlreadyExists { - err = ioutil.WriteFile(lockfilePath, []byte{}, 0644) + err = ioutil.WriteFile(lockfilePath, []byte{}, 0o644) require.NoError(t, err) } opts := DefaultOptions() diff --git a/tsdb/encoding/encoding.go b/tsdb/encoding/encoding.go index 8a94ff7bac..c2da86afa1 100644 --- a/tsdb/encoding/encoding.go +++ b/tsdb/encoding/encoding.go @@ -133,7 +133,6 @@ func NewDecbufAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf { dec := Decbuf{B: b[:len(b)-4]} if castagnoliTable != nil { - if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.Crc32(castagnoliTable) != exp { return Decbuf{E: ErrInvalidChecksum} } diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 086e696675..63ac512c91 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -284,7 +284,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int { // This math is essentially looking at nextIndex, where we would write the next exemplar to, // and find the index in the old exemplar buffer that we should start migrating exemplars from. // This way we don't migrate exemplars that would just be overwritten when migrating later exemplars. - var startIndex = (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) + startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) for i := int64(0); i < count; i++ { idx := (startIndex + i) % int64(len(oldBuffer)) diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index eb95daa34b..db47780fdd 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -448,7 +448,6 @@ func TestResize(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics) require.NoError(t, err) es := exs.(*CircularExemplarStorage) @@ -456,7 +455,8 @@ func TestResize(t *testing.T) { for i := 0; int64(i) < tc.startSize; i++ { err = es.AddExemplar(labels.FromStrings("service", strconv.Itoa(i)), exemplar.Exemplar{ Value: float64(i), - Ts: int64(i)}) + Ts: int64(i), + }) require.NoError(t, err) } diff --git a/tsdb/fileutil/fileutil.go b/tsdb/fileutil/fileutil.go index 927ebe004d..8ab8ce3dd1 100644 --- a/tsdb/fileutil/fileutil.go +++ b/tsdb/fileutil/fileutil.go @@ -27,7 +27,7 @@ import ( // CopyDirs copies all directories, subdirectories and files recursively including the empty folders. // Source and destination must be full paths. func CopyDirs(src, dest string) error { - if err := os.MkdirAll(dest, 0777); err != nil { + if err := os.MkdirAll(dest, 0o777); err != nil { return err } files, err := readDirs(src) @@ -46,7 +46,7 @@ func CopyDirs(src, dest string) error { // Empty directories are also created. if stat.IsDir() { - if err := os.MkdirAll(dp, 0777); err != nil { + if err := os.MkdirAll(dp, 0o777); err != nil { return err } continue @@ -65,7 +65,7 @@ func copyFile(src, dest string) error { return err } - err = ioutil.WriteFile(dest, data, 0666) + err = ioutil.WriteFile(dest, data, 0o666) if err != nil { return err } diff --git a/tsdb/fileutil/flock.go b/tsdb/fileutil/flock.go index d5eaa7ca2a..e0082e2f2c 100644 --- a/tsdb/fileutil/flock.go +++ b/tsdb/fileutil/flock.go @@ -29,7 +29,7 @@ type Releaser interface { // locking has failed. Neither this function nor the returned Releaser is // goroutine-safe. func Flock(fileName string) (r Releaser, existed bool, err error) { - if err = os.MkdirAll(filepath.Dir(fileName), 0755); err != nil { + if err = os.MkdirAll(filepath.Dir(fileName), 0o755); err != nil { return nil, false, err } diff --git a/tsdb/fileutil/flock_plan9.go b/tsdb/fileutil/flock_plan9.go index 71ed67e8c3..3b9550e7f2 100644 --- a/tsdb/fileutil/flock_plan9.go +++ b/tsdb/fileutil/flock_plan9.go @@ -24,7 +24,7 @@ func (l *plan9Lock) Release() error { } func newLock(fileName string) (Releaser, error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0666) + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0o666) if err != nil { return nil, err } diff --git a/tsdb/fileutil/flock_solaris.go b/tsdb/fileutil/flock_solaris.go index f19c184a44..21be384d33 100644 --- a/tsdb/fileutil/flock_solaris.go +++ b/tsdb/fileutil/flock_solaris.go @@ -46,7 +46,7 @@ func (l *unixLock) set(lock bool) error { } func newLock(fileName string) (Releaser, error) { - f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666) + f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666) if err != nil { return nil, err } diff --git a/tsdb/fileutil/flock_unix.go b/tsdb/fileutil/flock_unix.go index c0aeb69486..9637f073bf 100644 --- a/tsdb/fileutil/flock_unix.go +++ b/tsdb/fileutil/flock_unix.go @@ -41,7 +41,7 @@ func (l *unixLock) set(lock bool) error { } func newLock(fileName string) (Releaser, error) { - f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666) + f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666) if err != nil { return nil, err } diff --git a/tsdb/head.go b/tsdb/head.go index 80edf794e0..8bfbb1174b 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -850,7 +850,7 @@ func (h *Head) WaitForPendingReadersInTimeRange(mint, maxt int64) { // new range head and the new querier. This methods helps preventing races with the truncation of in-memory data. // // NOTE: The querier should already be taken before calling this. -func (h *Head) IsQuerierCollidingWithTruncation(querierMint, querierMaxt int64) (shouldClose bool, getNew bool, newMint int64) { +func (h *Head) IsQuerierCollidingWithTruncation(querierMint, querierMaxt int64) (shouldClose, getNew bool, newMint int64) { if !h.memTruncationInProcess.Load() { return false, false, 0 } @@ -1197,7 +1197,6 @@ func (h *Head) Close() error { errs.Add(h.performChunkSnapshot()) } return errs.Err() - } // String returns an human readable representation of the TSDB head. It's important to diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 1018476402..8ace653556 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -842,7 +842,6 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) { require.Equal(t, 1, series) require.Equal(t, 9999, samples) require.Equal(t, 1, stones) - } func TestDelete_e2e(t *testing.T) { @@ -1472,7 +1471,7 @@ func TestHeadReadWriterRepair(t *testing.T) { require.Equal(t, 7, len(files)) // Corrupt the 4th file by writing a random byte to series ref. - f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0666) + f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0o666) require.NoError(t, err) n, err := f.WriteAt([]byte{67, 88}, chunks.HeadChunkFileHeaderSize+2) require.NoError(t, err) @@ -1946,7 +1945,8 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { lastSeriesTimestamp int64 = 300 ) var ( - seriesTimestamps = []int64{firstSeriesTimestamp, + seriesTimestamps = []int64{ + firstSeriesTimestamp, secondSeriesTimestamp, lastSeriesTimestamp, } @@ -1963,7 +1963,7 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { require.Equal(t, head.MinTime(), firstSeriesTimestamp) require.Equal(t, head.MaxTime(), lastSeriesTimestamp) - var testCases = []struct { + testCases := []struct { name string mint int64 maxt int64 @@ -2007,7 +2007,7 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) { } require.NoError(t, app.Commit()) - var testCases = []struct { + testCases := []struct { name string labelName string matchers []*labels.Matcher @@ -2765,7 +2765,6 @@ func TestChunkSnapshot(t *testing.T) { require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal)) } - } func TestSnapshotError(t *testing.T) { diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 4788298240..3f6416e171 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -15,11 +15,6 @@ package tsdb import ( "fmt" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/tsdb/encoding" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/prometheus/prometheus/tsdb/fileutil" "io/ioutil" "math" "os" @@ -30,6 +25,12 @@ import ( "sync" "time" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/encoding" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/go-kit/log/level" "github.com/pkg/errors" "go.uber.org/atomic" @@ -585,7 +586,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { cpdirtmp := cpdir + ".tmp" stats.Dir = cpdir - if err := os.MkdirAll(cpdirtmp, 0777); err != nil { + if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return stats, errors.Wrap(err, "create chunk snapshot dir") } cp, err := wal.New(nil, nil, cpdirtmp, h.wal.CompressionEnabled()) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index d6f5d6b880..bfe630a7a0 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -262,7 +262,7 @@ type FileWriter struct { } func NewFileWriter(name string) (*FileWriter, error) { - f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0666) + f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o666) if err != nil { return nil, err } @@ -903,7 +903,6 @@ func (w *Writer) writePostingsToTmpFiles() error { values := make([]uint32, 0, len(postings[sid])) for v := range postings[sid] { values = append(values, v) - } // Symbol numbers are in order, so the strings will also be in order. sort.Sort(uint32slice(values)) @@ -1265,7 +1264,7 @@ type Symbols struct { const symbolFactor = 32 // NewSymbols returns a Symbols object for symbol lookups. -func NewSymbols(bs ByteSlice, version int, off int) (*Symbols, error) { +func NewSymbols(bs ByteSlice, version, off int) (*Symbols, error) { s := &Symbols{ bs: bs, version: version, @@ -1504,7 +1503,7 @@ func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string } else { d.Skip(skip) } - s := yoloString(d.UvarintBytes()) //Label value. + s := yoloString(d.UvarintBytes()) // Label value. values = append(values, s) if s == lastVal { break diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 0e75d37588..8fdc630690 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -154,7 +154,7 @@ func TestIndexRW_Create_Open(t *testing.T) { require.NoError(t, ir.Close()) // Modify magic header must cause open to fail. - f, err := os.OpenFile(fn, os.O_WRONLY, 0666) + f, err := os.OpenFile(fn, os.O_WRONLY, 0o666) require.NoError(t, err) _, err = f.WriteAt([]byte{0, 0}, 0) require.NoError(t, err) @@ -340,7 +340,6 @@ func TestPostingsMany(t *testing.T) { } require.Equal(t, exp, got, fmt.Sprintf("input: %v", c.in)) } - } func TestPersistence_index_e2e(t *testing.T) { @@ -504,7 +503,7 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) { dir := testutil.NewTemporaryDirectory("block", t) idxName := filepath.Join(dir.Path(), "index") - err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0666) + err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0o666) require.NoError(t, err) _, err = NewFileReader(idxName) diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index bbf5332a5a..4ec85aee4d 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -67,7 +67,7 @@ func TestIntersect(t *testing.T) { a := newListPostings(1, 2, 3) b := newListPostings(2, 3, 4) - var cases = []struct { + cases := []struct { in []Postings res Postings @@ -182,7 +182,7 @@ func TestIntersect(t *testing.T) { } func TestMultiIntersect(t *testing.T) { - var cases = []struct { + cases := []struct { p [][]uint64 res []uint64 }{ @@ -320,7 +320,7 @@ func TestMultiMerge(t *testing.T) { } func TestMergedPostings(t *testing.T) { - var cases = []struct { + cases := []struct { in []Postings res Postings @@ -424,7 +424,7 @@ func TestMergedPostings(t *testing.T) { } func TestMergedPostingsSeek(t *testing.T) { - var cases = []struct { + cases := []struct { a, b []uint64 seek uint64 @@ -486,7 +486,7 @@ func TestMergedPostingsSeek(t *testing.T) { } func TestRemovedPostings(t *testing.T) { - var cases = []struct { + cases := []struct { a, b []uint64 res []uint64 }{ @@ -535,7 +535,6 @@ func TestRemovedPostings(t *testing.T) { require.NoError(t, err) require.Equal(t, c.res, res) } - } func TestRemovedNextStackoverflow(t *testing.T) { @@ -561,7 +560,7 @@ func TestRemovedNextStackoverflow(t *testing.T) { } func TestRemovedPostingsSeek(t *testing.T) { - var cases = []struct { + cases := []struct { a, b []uint64 seek uint64 @@ -738,7 +737,7 @@ func TestIntersectWithMerge(t *testing.T) { } func TestWithoutPostings(t *testing.T) { - var cases = []struct { + cases := []struct { base Postings drop Postings @@ -826,7 +825,6 @@ func BenchmarkPostings_Stats(b *testing.B) { p.Add(seriesID, labels.FromStrings(name, value)) seriesID++ } - } createPostingsLabelValues("__name__", "metrics_name_can_be_very_big_and_bad", 1e3) for i := 0; i < 20; i++ { @@ -845,7 +843,6 @@ func BenchmarkPostings_Stats(b *testing.B) { for n := 0; n < b.N; n++ { p.Stats("__name__") } - } func TestMemPostings_Delete(t *testing.T) { diff --git a/tsdb/index/postingsstats.go b/tsdb/index/postingsstats.go index 2fc2465d9c..5e5880720a 100644 --- a/tsdb/index/postingsstats.go +++ b/tsdb/index/postingsstats.go @@ -59,7 +59,6 @@ func (m *maxHeap) push(item Stat) { m.minIndex = i } } - } func (m *maxHeap) get() []Stat { diff --git a/tsdb/index/postingsstats_test.go b/tsdb/index/postingsstats_test.go index 910b5a06a8..7ce51c795f 100644 --- a/tsdb/index/postingsstats_test.go +++ b/tsdb/index/postingsstats_test.go @@ -37,7 +37,6 @@ func TestPostingsStats(t *testing.T) { for i := 0; i < heapLength; i++ { require.Equal(t, uint64(max-i), data[i].Count) } - } func TestPostingsStats2(t *testing.T) { @@ -55,6 +54,7 @@ func TestPostingsStats2(t *testing.T) { require.Equal(t, 4, len(data)) require.Equal(t, uint64(11), data[0].Count) } + func BenchmarkPostingStatsMaxHep(b *testing.B) { stats := &maxHeap{} max := 9000000 @@ -71,5 +71,4 @@ func BenchmarkPostingStatsMaxHep(b *testing.B) { } stats.get() } - } diff --git a/tsdb/querier.go b/tsdb/querier.go index 18a1fd20a5..a0bf762c14 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -606,6 +606,7 @@ func (p *populateWithDelGenericSeriesIterator) Err() error { return p.err } func (p *populateWithDelGenericSeriesIterator) toSeriesIterator() chunkenc.Iterator { return &populateWithDelSeriesIterator{populateWithDelGenericSeriesIterator: p} } + func (p *populateWithDelGenericSeriesIterator) toChunkSeriesIterator() chunks.Iterator { return &populateWithDelChunkSeriesIterator{populateWithDelGenericSeriesIterator: p} } @@ -779,7 +780,7 @@ func (b *blockChunkSeriesSet) At() storage.ChunkSeries { } // NewMergedStringIter returns string iterator that allows to merge symbols on demand and stream result. -func NewMergedStringIter(a index.StringIter, b index.StringIter) index.StringIter { +func NewMergedStringIter(a, b index.StringIter) index.StringIter { return &mergedStringIter{a: a, b: b, aok: a.Next(), bok: b.Next()} } @@ -875,7 +876,6 @@ Outer: if ts <= tr.Maxt { return true - } it.Intervals = it.Intervals[1:] } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 514b1f5b91..c90aadaf39 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -918,7 +918,7 @@ func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) { // The subset are all equivalent so this does not capture merging of partial or non-overlapping sets well. // TODO(bwplotka): Merge with storage merged series set benchmark. func BenchmarkMergedSeriesSet(b *testing.B) { - var sel = func(sets []storage.SeriesSet) storage.SeriesSet { + sel := func(sets []storage.SeriesSet) storage.SeriesSet { return storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) } @@ -1860,7 +1860,6 @@ func TestPostingsForMatchers(t *testing.T) { t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp) } } - } // TestClose ensures that calling Close more than once doesn't block and doesn't panic. @@ -2116,7 +2115,12 @@ func TestBlockBaseSeriesSet(t *testing.T) { { lset: labels.New([]labels.Label{{Name: "a", Value: "a"}}...), chunks: []chunks.Meta{ - {Ref: 29}, {Ref: 45}, {Ref: 245}, {Ref: 123}, {Ref: 4232}, {Ref: 5344}, + {Ref: 29}, + {Ref: 45}, + {Ref: 245}, + {Ref: 123}, + {Ref: 4232}, + {Ref: 5344}, {Ref: 121}, }, ref: 12, diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 3bfb1be935..192c29ce70 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -41,10 +41,8 @@ const ( Exemplars Type = 4 ) -var ( - // ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. - ErrNotFound = errors.New("not found") -) +// ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. +var ErrNotFound = errors.New("not found") // RefSeries is the series labels with the series ID. type RefSeries struct { @@ -69,8 +67,7 @@ type RefExemplar struct { // Decoder decodes series, sample, and tombstone records. // The zero value is ready to use. -type Decoder struct { -} +type Decoder struct{} // Type returns the type of the record. // Returns RecordUnknown if no valid record type is found. @@ -225,8 +222,7 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp // Encoder encodes series, sample, and tombstones records. // The zero value is ready to use. -type Encoder struct { -} +type Encoder struct{} // Series appends the encoded series to b and returns the resulting slice. func (e *Encoder) Series(series []RefSeries, b []byte) []byte { diff --git a/tsdb/repair_test.go b/tsdb/repair_test.go index 7fb2720fd6..dc0fa598ef 100644 --- a/tsdb/repair_test.go +++ b/tsdb/repair_test.go @@ -78,7 +78,7 @@ func TestRepairBadIndexVersion(t *testing.T) { require.Error(t, err) // Touch chunks dir in block to imitate them. - require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0o777)) // Read current index to check integrity. r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename)) diff --git a/tsdb/test/hash_test.go b/tsdb/test/hash_test.go index 1778f0f863..1242f5db5d 100644 --- a/tsdb/test/hash_test.go +++ b/tsdb/test/hash_test.go @@ -81,7 +81,6 @@ func fnv64a(b []byte) uint64 { } func BenchmarkCRC32_diff(b *testing.B) { - data := [][]byte{} for i := 0; i < 1000; i++ { diff --git a/tsdb/tombstones/tombstones_test.go b/tsdb/tombstones/tombstones_test.go index fa1825d178..aee8568eb8 100644 --- a/tsdb/tombstones/tombstones_test.go +++ b/tsdb/tombstones/tombstones_test.go @@ -77,7 +77,7 @@ func TestDeletingTombstones(t *testing.T) { require.NoError(t, err) require.Equal(t, intervals, dranges) - stones.DeleteTombstones(map[uint64]struct{}{ref: struct{}{}}) + stones.DeleteTombstones(map[uint64]struct{}{ref: {}}) intervals, err = stones.Get(ref) require.NoError(t, err) diff --git a/tsdb/tsdbutil/buffer.go b/tsdb/tsdbutil/buffer.go index a24d504729..3e136bb1d5 100644 --- a/tsdb/tsdbutil/buffer.go +++ b/tsdb/tsdbutil/buffer.go @@ -210,7 +210,7 @@ func (r *sampleRing) last() (int64, float64, bool) { func (r *sampleRing) samples() []sample { res := make([]sample, r.l) - var k = r.f + r.l + k := r.f + r.l var j int if k > len(r.buf) { k = len(r.buf) diff --git a/tsdb/tsdbutil/chunks.go b/tsdb/tsdbutil/chunks.go index 5ae58b0a8c..ffe9c05e00 100644 --- a/tsdb/tsdbutil/chunks.go +++ b/tsdb/tsdbutil/chunks.go @@ -67,7 +67,7 @@ func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { } // GenerateSamples starting at start and counting up numSamples. -func GenerateSamples(start int, numSamples int) []Sample { +func GenerateSamples(start, numSamples int) []Sample { samples := make([]Sample, 0, numSamples) for i := start; i < start+numSamples; i++ { samples = append(samples, sample{ diff --git a/tsdb/wal.go b/tsdb/wal.go index 71fc0a44c3..281044a953 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -171,7 +171,7 @@ type SegmentWAL struct { // OpenSegmentWAL opens or creates a write ahead log in the given directory. // The WAL must be read completely before new data is written. func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, r prometheus.Registerer) (*SegmentWAL, error) { - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } df, err := fileutil.OpenDir(dir) @@ -505,7 +505,7 @@ func (w *SegmentWAL) LogDeletes(stones []tombstones.Stone) error { func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { // We must open all files in read/write mode as we may have to truncate along // the way and any file may become the head. - f, err := os.OpenFile(name, os.O_RDWR, 0666) + f, err := os.OpenFile(name, os.O_RDWR, 0o666) if err != nil { return nil, err } diff --git a/tsdb/wal/checkpoint.go b/tsdb/wal/checkpoint.go index eb0d27034a..80c0c10369 100644 --- a/tsdb/wal/checkpoint.go +++ b/tsdb/wal/checkpoint.go @@ -129,7 +129,7 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id uint64) bo return nil, errors.Wrap(err, "remove previous temporary checkpoint dir") } - if err := os.MkdirAll(cpdirtmp, 0777); err != nil { + if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return nil, errors.Wrap(err, "create checkpoint dir") } cp, err := New(nil, nil, cpdirtmp, w.CompressionEnabled()) diff --git a/tsdb/wal/checkpoint_test.go b/tsdb/wal/checkpoint_test.go index 9df533df1f..2a5ff736a5 100644 --- a/tsdb/wal/checkpoint_test.go +++ b/tsdb/wal/checkpoint_test.go @@ -36,37 +36,37 @@ func TestLastCheckpoint(t *testing.T) { _, _, err := LastCheckpoint(dir) require.Equal(t, record.ErrNotFound, err) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0o777)) s, k, err := LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s) require.Equal(t, 0, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s) require.Equal(t, 0, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.1"), s) require.Equal(t, 1, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.1000"), s) require.Equal(t, 1000, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.99999999"), s) require.Equal(t, 99999999, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.100000000"), s) @@ -78,10 +78,10 @@ func TestDeleteCheckpoints(t *testing.T) { require.NoError(t, DeleteCheckpoints(dir, 0)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0o777)) require.NoError(t, DeleteCheckpoints(dir, 2)) @@ -93,9 +93,9 @@ func TestDeleteCheckpoints(t *testing.T) { } require.Equal(t, []string{"checkpoint.02", "checkpoint.03"}, fns) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0o777)) require.NoError(t, DeleteCheckpoints(dir, 100000000)) @@ -233,11 +233,12 @@ func TestCheckpointNoTmpFolderAfterError(t *testing.T) { require.NoError(t, err) var enc record.Encoder require.NoError(t, w.Log(enc.Series([]record.RefSeries{ - {Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")}}, nil))) + {Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")}, + }, nil))) require.NoError(t, w.Close()) // Corrupt data. - f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0666) + f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0o666) require.NoError(t, err) _, err = f.WriteAt([]byte{42}, 1) require.NoError(t, err) diff --git a/tsdb/wal/reader_test.go b/tsdb/wal/reader_test.go index 0513b6782a..07dc4dbc5e 100644 --- a/tsdb/wal/reader_test.go +++ b/tsdb/wal/reader_test.go @@ -59,100 +59,102 @@ var readerConstructors = map[string]func(io.Reader) reader{ }, } -var data = make([]byte, 100000) -var testReaderCases = []struct { - t []rec - exp [][]byte - fail bool -}{ - // Sequence of valid records. - { - t: []rec{ - {recFull, data[0:200]}, - {recFirst, data[200:300]}, - {recLast, data[300:400]}, - {recFirst, data[400:800]}, - {recMiddle, data[800:900]}, - {recPageTerm, make([]byte, pageSize-900-recordHeaderSize*5-1)}, // exactly lines up with page boundary. - {recLast, data[900:900]}, - {recFirst, data[900:1000]}, - {recMiddle, data[1000:1200]}, - {recMiddle, data[1200:30000]}, - {recMiddle, data[30000:30001]}, - {recMiddle, data[30001:30001]}, - {recLast, data[30001:32000]}, +var ( + data = make([]byte, 100000) + testReaderCases = []struct { + t []rec + exp [][]byte + fail bool + }{ + // Sequence of valid records. + { + t: []rec{ + {recFull, data[0:200]}, + {recFirst, data[200:300]}, + {recLast, data[300:400]}, + {recFirst, data[400:800]}, + {recMiddle, data[800:900]}, + {recPageTerm, make([]byte, pageSize-900-recordHeaderSize*5-1)}, // exactly lines up with page boundary. + {recLast, data[900:900]}, + {recFirst, data[900:1000]}, + {recMiddle, data[1000:1200]}, + {recMiddle, data[1200:30000]}, + {recMiddle, data[30000:30001]}, + {recMiddle, data[30001:30001]}, + {recLast, data[30001:32000]}, + }, + exp: [][]byte{ + data[0:200], + data[200:400], + data[400:900], + data[900:32000], + }, }, - exp: [][]byte{ - data[0:200], - data[200:400], - data[400:900], - data[900:32000], + // Exactly at the limit of one page minus the header size + { + t: []rec{ + {recFull, data[0 : pageSize-recordHeaderSize]}, + }, + exp: [][]byte{ + data[:pageSize-recordHeaderSize], + }, }, - }, - // Exactly at the limit of one page minus the header size - { - t: []rec{ - {recFull, data[0 : pageSize-recordHeaderSize]}, + // More than a full page, this exceeds our buffer and can never happen + // when written by the WAL. + { + t: []rec{ + {recFull, data[0 : pageSize+1]}, + }, + fail: true, }, - exp: [][]byte{ - data[:pageSize-recordHeaderSize], + // Two records the together are too big for a page. + // NB currently the non-live reader succeeds on this. I think this is a bug. + // but we've seen it in production. + { + t: []rec{ + {recFull, data[:pageSize/2]}, + {recFull, data[:pageSize/2]}, + }, + exp: [][]byte{ + data[:pageSize/2], + data[:pageSize/2], + }, }, - }, - // More than a full page, this exceeds our buffer and can never happen - // when written by the WAL. - { - t: []rec{ - {recFull, data[0 : pageSize+1]}, + // Invalid orders of record types. + { + t: []rec{{recMiddle, data[:200]}}, + fail: true, }, - fail: true, - }, - // Two records the together are too big for a page. - // NB currently the non-live reader succeeds on this. I think this is a bug. - // but we've seen it in production. - { - t: []rec{ - {recFull, data[:pageSize/2]}, - {recFull, data[:pageSize/2]}, + { + t: []rec{{recLast, data[:200]}}, + fail: true, }, - exp: [][]byte{ - data[:pageSize/2], - data[:pageSize/2], + { + t: []rec{ + {recFirst, data[:200]}, + {recFull, data[200:400]}, + }, + fail: true, }, - }, - // Invalid orders of record types. - { - t: []rec{{recMiddle, data[:200]}}, - fail: true, - }, - { - t: []rec{{recLast, data[:200]}}, - fail: true, - }, - { - t: []rec{ - {recFirst, data[:200]}, - {recFull, data[200:400]}, + { + t: []rec{ + {recFirst, data[:100]}, + {recMiddle, data[100:200]}, + {recFull, data[200:400]}, + }, + fail: true, }, - fail: true, - }, - { - t: []rec{ - {recFirst, data[:100]}, - {recMiddle, data[100:200]}, - {recFull, data[200:400]}, + // Non-zero data after page termination. + { + t: []rec{ + {recFull, data[:100]}, + {recPageTerm, append(make([]byte, pageSize-recordHeaderSize-102), 1)}, + }, + exp: [][]byte{data[:100]}, + fail: true, }, - fail: true, - }, - // Non-zero data after page termination. - { - t: []rec{ - {recFull, data[:100]}, - {recPageTerm, append(make([]byte, pageSize-recordHeaderSize-102), 1)}, - }, - exp: [][]byte{data[:100]}, - fail: true, - }, -} + } +) func encodedRecord(t recType, b []byte) []byte { if t == recPageTerm { @@ -279,6 +281,7 @@ type multiReadCloser struct { func (m *multiReadCloser) Read(p []byte) (n int, err error) { return m.reader.Read(p) } + func (m *multiReadCloser) Close() error { return tsdb_errors.NewMulti(tsdb_errors.CloseAll(m.closers)).Err() } @@ -439,7 +442,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { err = w.Close() require.NoError(t, err) - segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0666) + segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0o666) require.NoError(t, err) err = segmentFile.Truncate(pageSize / 2) @@ -479,7 +482,7 @@ func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) { err = w.Close() require.NoError(t, err) - segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0666) + segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0o666) require.NoError(t, err) // Override the record length diff --git a/tsdb/wal/wal.go b/tsdb/wal/wal.go index 7254fc4c75..872e83a4d5 100644 --- a/tsdb/wal/wal.go +++ b/tsdb/wal/wal.go @@ -118,7 +118,7 @@ func (e *CorruptionErr) Error() string { // OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) - f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666) + f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0o666) if err != nil { return nil, err } @@ -144,7 +144,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { // CreateSegment creates a new segment k in dir. func CreateSegment(dir string, k int) (*Segment, error) { - f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o666) if err != nil { return nil, err } @@ -260,7 +260,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, errors.Wrap(err, "create dir") } if logger == nil { diff --git a/tsdb/wal/wal_test.go b/tsdb/wal/wal_test.go index d73a94fbbf..55cd6caa1b 100644 --- a/tsdb/wal/wal_test.go +++ b/tsdb/wal/wal_test.go @@ -141,7 +141,7 @@ func TestWALRepair_ReadingError(t *testing.T) { require.NoError(t, w.Close()) - f, err := os.OpenFile(SegmentName(dir, test.corrSgm), os.O_RDWR, 0666) + f, err := os.OpenFile(SegmentName(dir, test.corrSgm), os.O_RDWR, 0o666) require.NoError(t, err) // Apply corruption function. @@ -167,7 +167,7 @@ func TestWALRepair_ReadingError(t *testing.T) { for r.Next() { } - //Close the segment so we don't break things on Windows. + // Close the segment so we don't break things on Windows. s.Close() // No corruption in this segment. @@ -244,7 +244,7 @@ func TestCorruptAndCarryOn(t *testing.T) { segments, err := listSegments(dir) require.NoError(t, err) for _, segment := range segments { - f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", segment.index)), os.O_RDONLY, 0666) + f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", segment.index)), os.O_RDONLY, 0o666) require.NoError(t, err) fi, err := f.Stat() @@ -261,7 +261,7 @@ func TestCorruptAndCarryOn(t *testing.T) { // Truncate the first file, splitting the middle record in the second // page in half, leaving 4 valid records. { - f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", 0)), os.O_RDWR, 0666) + f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", 0)), os.O_RDWR, 0o666) require.NoError(t, err) fi, err := f.Stat() diff --git a/tsdb/wal/watcher_test.go b/tsdb/wal/watcher_test.go index a1786f823d..58800431b5 100644 --- a/tsdb/wal/watcher_test.go +++ b/tsdb/wal/watcher_test.go @@ -29,9 +29,11 @@ import ( "github.com/prometheus/prometheus/tsdb/record" ) -var defaultRetryInterval = 100 * time.Millisecond -var defaultRetries = 100 -var wMetrics = NewWatcherMetrics(prometheus.DefaultRegisterer) +var ( + defaultRetryInterval = 100 * time.Millisecond + defaultRetries = 100 + wMetrics = NewWatcherMetrics(prometheus.DefaultRegisterer) +) // retry executes f() n times at each interval until it returns true. func retry(t *testing.T, interval time.Duration, n int, f func() bool) { @@ -112,7 +114,7 @@ func TestTailSamples(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} @@ -201,7 +203,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) w, err := NewSize(nil, nil, wdir, 128*pageSize, compress) @@ -271,7 +273,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} @@ -358,7 +360,7 @@ func TestReadCheckpoint(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) os.Create(SegmentName(wdir, 30)) @@ -426,7 +428,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} @@ -462,7 +464,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { // At this point we should have at least 6 segments, lets create a checkpoint dir of the first 5. checkpointDir := dir + "/wal/checkpoint.000004" - err = os.Mkdir(checkpointDir, 0777) + err = os.Mkdir(checkpointDir, 0o777) require.NoError(t, err) for i := 0; i <= 4; i++ { err := os.Rename(SegmentName(dir+"/wal", i), SegmentName(checkpointDir, i)) @@ -504,7 +506,7 @@ func TestCheckpointSeriesReset(t *testing.T) { dir := t.TempDir() wdir := path.Join(dir, "wal") - err := os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} diff --git a/tsdb/wal_test.go b/tsdb/wal_test.go index fdcd6ce52d..ba8694b7a3 100644 --- a/tsdb/wal_test.go +++ b/tsdb/wal_test.go @@ -326,7 +326,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "truncate_checksum", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() @@ -339,7 +339,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "truncate_body", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() @@ -352,7 +352,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "body_content", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() @@ -367,7 +367,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "checksum", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() diff --git a/util/osutil/hostname.go b/util/osutil/hostname.go index 224dffe7c4..c44cb391b6 100644 --- a/util/osutil/hostname.go +++ b/util/osutil/hostname.go @@ -49,14 +49,12 @@ func GetFQDN() (string, error) { if fqdn, err := lookup(ip); err == nil { return fqdn, nil } - } if ip := addr.To16(); ip != nil { if fqdn, err := lookup(ip); err == nil { return fqdn, nil } - } } return hostname, nil diff --git a/util/strutil/strconv.go b/util/strutil/strconv.go index 3d96e4faf6..eed0134abd 100644 --- a/util/strutil/strconv.go +++ b/util/strutil/strconv.go @@ -19,9 +19,7 @@ import ( "regexp" ) -var ( - invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) -) +var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) // TableLinkForExpression creates an escaped relative link to the table view of // the provided expression. diff --git a/util/testutil/roundtrip.go b/util/testutil/roundtrip.go index 996d11f368..a93991a13e 100644 --- a/util/testutil/roundtrip.go +++ b/util/testutil/roundtrip.go @@ -43,5 +43,7 @@ func NewRoundTripCheckRequest(checkRequest func(*http.Request), theResponse *htt checkRequest: checkRequest, roundTrip: roundTrip{ theResponse: theResponse, - theError: theError}} + theError: theError, + }, + } } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 4d85a56885..dc9c01d9a9 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -75,9 +75,7 @@ const ( errorNotFound errorType = "not_found" ) -var ( - LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} -) +var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} type apiError struct { typ errorType @@ -1453,7 +1451,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult { rand.Int63()) dir = filepath.Join(snapdir, name) ) - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot directory")}, nil, nil} } if err := api.db.Snapshot(dir, !skipHead); err != nil { @@ -1509,7 +1507,6 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter Error: apiErr.err.Error(), Data: data, }) - if err != nil { level.Error(api.logger).Log("msg", "error marshaling json response", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) @@ -1652,7 +1649,7 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteMore() stream.WriteObjectField(`timestamp`) marshalTimestamp(p.Ts, stream) - //marshalTimestamp(p.Ts, stream) + // marshalTimestamp(p.Ts, stream) stream.WriteObjectEnd() } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index b839e61cd5..a04a3d8dbc 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -126,9 +126,7 @@ func newTestTargetRetriever(targetsInfo []*testTargetParams) *testTargetRetrieve } } -var ( - scrapeStart = time.Now().Add(-11 * time.Second) -) +var scrapeStart = time.Now().Add(-11 * time.Second) func (t testTargetRetriever) TargetsActive() map[string][]*scrape.Target { return t.activeTargets @@ -452,7 +450,6 @@ func TestEndpoints(t *testing.T) { testEndpoints(t, api, testTargetRetriever, suite.ExemplarStorage(), false) }) - } func TestLabelNames(t *testing.T) { @@ -651,7 +648,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E exemplars []exemplar.QueryResult } - var tests = []test{ + tests := []test{ { endpoint: api.query, query: url.Values{ @@ -2137,7 +2134,7 @@ func assertAPIError(t *testing.T, got *apiError, exp errorType) { } } -func assertAPIResponse(t *testing.T, got interface{}, exp interface{}) { +func assertAPIResponse(t *testing.T, got, exp interface{}) { t.Helper() require.Equal(t, exp, got) @@ -2179,6 +2176,7 @@ func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) { h, _ := tsdb.NewHead(nil, nil, nil, opts, nil) return h.Stats(statsByLabelName), nil } + func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) { return tsdb.WALReplayStatus{}, nil } @@ -2449,7 +2447,7 @@ func TestParseTimeParam(t *testing.T) { ts, err := parseTime("1582468023986") require.NoError(t, err) - var tests = []struct { + tests := []struct { paramName string paramValue string defaultValue time.Time @@ -2508,7 +2506,7 @@ func TestParseTime(t *testing.T) { panic(err) } - var tests = []struct { + tests := []struct { input string fail bool result time.Time @@ -2516,25 +2514,32 @@ func TestParseTime(t *testing.T) { { input: "", fail: true, - }, { + }, + { input: "abc", fail: true, - }, { + }, + { input: "30s", fail: true, - }, { + }, + { input: "123", result: time.Unix(123, 0), - }, { + }, + { input: "123.123", result: time.Unix(123, 123000000), - }, { + }, + { input: "2015-06-03T13:21:58.555Z", result: ts, - }, { + }, + { input: "2015-06-03T14:21:58.555+01:00", result: ts, - }, { + }, + { // Test float rounding. input: "1543578564.705", result: time.Unix(1543578564, 705*1e6), @@ -2566,7 +2571,7 @@ func TestParseTime(t *testing.T) { } func TestParseDuration(t *testing.T) { - var tests = []struct { + tests := []struct { input string fail bool result time.Duration diff --git a/web/web.go b/web/web.go index 269abd0bf0..f4e5fd65cb 100644 --- a/web/web.go +++ b/web/web.go @@ -607,7 +607,6 @@ func (h *Handler) Run(ctx context.Context, listener net.Listener, webConfig stri } func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) { - var groups []*rules.Group for _, group := range h.ruleManager.RuleGroups() { if group.HasAlertingRules() { From b7bdf6fab224e0a1126454b62bb30246acc2d2e0 Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Fri, 22 Oct 2021 10:19:38 +0200 Subject: [PATCH 051/161] Fix imports formatting According to https://github.com/prometheus/prometheus/commit/282990880615ab698fe9583b21ae52e19c333f46#r58457095. Signed-off-by: Mateusz Gozdek --- cmd/promtool/backfill.go | 1 + cmd/promtool/backfill_test.go | 3 ++- cmd/promtool/main_test.go | 3 ++- cmd/promtool/rules.go | 1 + cmd/promtool/rules_test.go | 3 ++- cmd/promtool/sd_test.go | 1 + cmd/promtool/tsdb.go | 3 +-- discovery/http/http_test.go | 3 ++- discovery/kubernetes/endpoints.go | 3 +-- discovery/moby/docker.go | 1 + discovery/moby/network.go | 1 + discovery/puppetdb/puppetdb_test.go | 3 ++- discovery/puppetdb/resources.go | 1 + discovery/scaleway/baremetal.go | 5 +++-- discovery/scaleway/instance.go | 5 +++-- discovery/scaleway/scaleway.go | 3 ++- promql/parser/printer_test.go | 4 ++-- storage/remote/metadata_watcher.go | 1 + storage/remote/metadata_watcher_test.go | 3 ++- storage/remote/queue_manager.go | 4 ++-- storage/remote/read_handler.go | 2 +- tsdb/agent/db.go | 3 ++- tsdb/agent/db_test.go | 3 ++- tsdb/exemplar.go | 1 + tsdb/exemplar_test.go | 2 +- tsdb/head_wal.go | 11 +++++------ 26 files changed, 45 insertions(+), 29 deletions(-) diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index b626743dea..6cd8fd1f3a 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -21,6 +21,7 @@ import ( "github.com/go-kit/log" "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/textparse" "github.com/prometheus/prometheus/tsdb" diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index 4bbb5602c9..932d00cca0 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -22,10 +22,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" - "github.com/stretchr/testify/require" ) type backfillSample struct { diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 138548fc75..0c2eb26d72 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -21,9 +21,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/rulefmt" - "github.com/stretchr/testify/require" ) func TestQueryRange(t *testing.T) { diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index ff98e80d8a..e05f5ca7ab 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/rules" diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index 868dff984f..6e51785edd 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -25,9 +25,10 @@ import ( "github.com/go-kit/log" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb" - "github.com/stretchr/testify/require" ) type mockQueryRangeAPI struct { diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go index d9d9744812..793b8ebb38 100644 --- a/cmd/promtool/sd_test.go +++ b/cmd/promtool/sd_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/pkg/labels" diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 5e77802a98..6318336ce5 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -31,8 +31,6 @@ import ( "text/tabwriter" "time" - "github.com/prometheus/prometheus/tsdb/index" - "github.com/alecthomas/units" "github.com/go-kit/log" "github.com/pkg/errors" @@ -42,6 +40,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/index" ) const timeDelta = 30000 diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index b7a0369e70..896eec1beb 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -24,8 +24,9 @@ import ( "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/discovery/targetgroup" ) func TestHTTPValidRefresh(t *testing.T) { diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 49e515a14e..510b33f448 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -18,8 +18,6 @@ import ( "net" "strconv" - "github.com/prometheus/prometheus/util/strutil" - "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" @@ -29,6 +27,7 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" ) var ( diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index deabcdd1e2..162833ece4 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -28,6 +28,7 @@ import ( "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" diff --git a/discovery/moby/network.go b/discovery/moby/network.go index 3982e57776..0e0d0041de 100644 --- a/discovery/moby/network.go +++ b/discovery/moby/network.go @@ -19,6 +19,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/client" + "github.com/prometheus/prometheus/util/strutil" ) diff --git a/discovery/puppetdb/puppetdb_test.go b/discovery/puppetdb/puppetdb_test.go index 3fcfab549c..25340bea77 100644 --- a/discovery/puppetdb/puppetdb_test.go +++ b/discovery/puppetdb/puppetdb_test.go @@ -25,8 +25,9 @@ import ( "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/discovery/targetgroup" ) func mockServer(t *testing.T) *httptest.Server { diff --git a/discovery/puppetdb/resources.go b/discovery/puppetdb/resources.go index 64b3a781e6..27792b6460 100644 --- a/discovery/puppetdb/resources.go +++ b/discovery/puppetdb/resources.go @@ -18,6 +18,7 @@ import ( "strings" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/util/strutil" ) diff --git a/discovery/scaleway/baremetal.go b/discovery/scaleway/baremetal.go index 9e002b9878..c313e6695d 100644 --- a/discovery/scaleway/baremetal.go +++ b/discovery/scaleway/baremetal.go @@ -25,10 +25,11 @@ import ( "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" - "github.com/prometheus/prometheus/discovery/refresh" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/scaleway/scaleway-sdk-go/api/baremetal/v1" "github.com/scaleway/scaleway-sdk-go/scw" + + "github.com/prometheus/prometheus/discovery/refresh" + "github.com/prometheus/prometheus/discovery/targetgroup" ) type baremetalDiscovery struct { diff --git a/discovery/scaleway/instance.go b/discovery/scaleway/instance.go index a78c5e93ca..67311216d0 100644 --- a/discovery/scaleway/instance.go +++ b/discovery/scaleway/instance.go @@ -25,10 +25,11 @@ import ( "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" - "github.com/prometheus/prometheus/discovery/refresh" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/scaleway/scaleway-sdk-go/api/instance/v1" "github.com/scaleway/scaleway-sdk-go/scw" + + "github.com/prometheus/prometheus/discovery/refresh" + "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index 920f087a1c..ed3d7f3917 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -24,10 +24,11 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/scaleway/scaleway-sdk-go/scw" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/scaleway/scaleway-sdk-go/scw" ) // metaLabelPrefix is the meta prefix used for all meta labels. diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go index e687820c92..2a7e438a22 100644 --- a/promql/parser/printer_test.go +++ b/promql/parser/printer_test.go @@ -16,9 +16,9 @@ package parser import ( "testing" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/pkg/labels" ) func TestExprString(t *testing.T) { diff --git a/storage/remote/metadata_watcher.go b/storage/remote/metadata_watcher.go index 1096e406ad..eee36463b1 100644 --- a/storage/remote/metadata_watcher.go +++ b/storage/remote/metadata_watcher.go @@ -21,6 +21,7 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/scrape" ) diff --git a/storage/remote/metadata_watcher_test.go b/storage/remote/metadata_watcher_test.go index c8114fbd65..d51011a8cb 100644 --- a/storage/remote/metadata_watcher_test.go +++ b/storage/remote/metadata_watcher_test.go @@ -20,9 +20,10 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/pkg/textparse" "github.com/prometheus/prometheus/scrape" - "github.com/stretchr/testify/require" ) var ( diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 8664555a5d..d76b634051 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -26,10 +26,10 @@ import ( "github.com/golang/snappy" "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" - "go.uber.org/atomic" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "go.uber.org/atomic" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/relabel" diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go index ceea4579af..581b54a521 100644 --- a/storage/remote/read_handler.go +++ b/storage/remote/read_handler.go @@ -20,8 +20,8 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/gate" "github.com/prometheus/prometheus/pkg/labels" diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 30d7793559..36e843e838 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -25,6 +25,8 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "go.uber.org/atomic" + "github.com/prometheus/prometheus/pkg/exemplar" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/timestamp" @@ -33,7 +35,6 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wal" - "go.uber.org/atomic" ) var ErrUnsupported = errors.New("unsupported operation with WAL-only storage") diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 8f6cdc68ab..464caa75d3 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -29,12 +29,13 @@ import ( "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wal" - "github.com/stretchr/testify/require" ) func TestUnsupported(t *testing.T) { diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 63ac512c91..53cb5c9d9e 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -20,6 +20,7 @@ import ( "unicode/utf8" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/exemplar" "github.com/prometheus/prometheus/pkg/labels" diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index db47780fdd..7876d95634 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -22,9 +22,9 @@ import ( "strings" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/exemplar" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 3f6416e171..4fa030a49d 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -25,18 +25,17 @@ import ( "sync" "time" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/tsdb/encoding" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/prometheus/prometheus/tsdb/fileutil" - "github.com/go-kit/log/level" "github.com/pkg/errors" "go.uber.org/atomic" "github.com/prometheus/prometheus/pkg/exemplar" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/encoding" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/wal" From ce658835880a4baf3ee5327bf58a2b641aa3e56a Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Fri, 22 Oct 2021 10:26:56 +0200 Subject: [PATCH 052/161] .golangci.yml: don't lint autogenerated files So when we enable linters for formatting, they do not complain about those files. Refs #9557 Signed-off-by: Mateusz Gozdek --- .golangci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index f9d72c01e2..bc5d5c8d14 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,8 @@ run: deadline: 5m + skip-files: + # Skip autogenerated files. + - ^.*\.(pb|y)\.go$ linters: enable: From 01c55822169a9123367e333dc407edd8a263f75d Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Fri, 22 Oct 2021 10:27:37 +0200 Subject: [PATCH 053/161] .golangci.yml: enable gofumpt and goimports linters For imports and more opinionated code formatting. Closes #9557 Signed-off-by: Mateusz Gozdek --- .golangci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index bc5d5c8d14..90ba281570 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -7,6 +7,8 @@ run: linters: enable: - depguard + - gofumpt + - goimports - revive issues: @@ -28,3 +30,7 @@ linters-settings: - github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" errcheck: exclude: scripts/errcheck_excludes.txt + goimports: + local-prefixes: github.com/prometheus/prometheus + gofumpt: + extra-rules: true From c3beca72e24cf441fe0927740a47c5df50d7f953 Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Fri, 15 Oct 2021 19:56:18 +0200 Subject: [PATCH 054/161] cmd/prometheus: wait for Prometheus to shutdown in tests So temporary data directory can be successfully removed, as on Windows, directory cannot be in used while removal. Signed-off-by: Mateusz Gozdek --- cmd/prometheus/main_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index e6c8fcc07c..94196a986b 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -226,6 +226,7 @@ func TestWALSegmentSizeBounds(t *testing.T) { t.Errorf("prometheus should be still running: %v", err) case <-time.After(5 * time.Second): prom.Process.Kill() + <-done } continue } @@ -268,6 +269,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { t.Errorf("prometheus should be still running: %v", err) case <-time.After(5 * time.Second): prom.Process.Kill() + <-done } continue } @@ -438,6 +440,7 @@ func TestModeSpecificFlags(t *testing.T) { t.Errorf("prometheus should be still running: %v", err) case <-time.After(5 * time.Second): prom.Process.Kill() + <-done } return } From ea924746b305d9ce1db01ec67d2b7b8ee773b029 Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Tue, 2 Nov 2021 22:17:32 +0100 Subject: [PATCH 055/161] discovery/kubernetes: improve test logic for waiting for discoverers (#9584) When running tests in parallel, 10 milliseconds may not be enough for all discoverers to register, which will make test flaky. This commit changes the waiting logic to wait for number of discoverers to stop increasing during given time frame, which should be large enough for single discoverer to register in test environment. A following run passes with this commit: go test -failfast -race -count 100 -v ./discovery/kubernetes/ Signed-off-by: Mateusz Gozdek --- discovery/kubernetes/kubernetes_test.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index 2da8ec546c..416e41adda 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -86,15 +86,18 @@ func (d k8sDiscoveryTest) Run(t *testing.T) { // Ensure that discovery has a discoverer set. This prevents a race // condition where the above go routine may or may not have set a // discoverer yet. + lastDiscoverersCount := 0 + dis := d.discovery.(*Discovery) for { - dis := d.discovery.(*Discovery) dis.RLock() l := len(dis.discoverers) dis.RUnlock() - if l > 0 { + if l > 0 && l == lastDiscoverersCount { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(100 * time.Millisecond) + + lastDiscoverersCount = l } resChan := make(chan map[string]*targetgroup.Group) From b40e254f25906edbe365b59daa5f1ee26569b4dc Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Wed, 3 Nov 2021 04:47:14 +0100 Subject: [PATCH 056/161] Agent: Add a boolean to the index to indicate agent mode. (#9649) I would like to avoid extra API call's to determine if we are running in Agent Mode, so I think we could use this approach. This is a bootstrap of #9612 Signed-off-by: Julien Pivotto --- web/ui/react-app/public/index.html | 9 ++++++--- web/web.go | 2 ++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/web/ui/react-app/public/index.html b/web/ui/react-app/public/index.html index 2671dc6cd8..eac493853c 100755 --- a/web/ui/react-app/public/index.html +++ b/web/ui/react-app/public/index.html @@ -10,12 +10,15 @@