diff --git a/.golangci.yml b/.golangci.yml index 6e46812c8f..1f31597c90 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -18,6 +18,7 @@ linters: - gofumpt - goimports - misspell + - nolintlint - predeclared - revive - unconvert @@ -32,15 +33,9 @@ issues: - path: _test.go linters: - errcheck - - path: discovery/ - linters: - - errorlint - path: scrape/ linters: - errorlint - - path: storage/ - linters: - - errorlint - path: tsdb/ linters: - errorlint diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ad9c69dc9..bc14091cde 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,40 @@ # Changelog +## 2.48.0-rc.1 / 2023-10-24 + +* [BUGFIX] PromQL: Reduce inefficiency introduced by warnings/annotations and temporarily remove possible non-counter warnings. #13012 + +## 2.48.0-rc.0 / 2023-10-10 + +* [CHANGE] Remote-write: respect Retry-After header on 5xx errors. #12677 +* [FEATURE] Alerting: Add AWS SigV4 authentication support for Alertmanager endpoints. #12774 +* [FEATURE] Promtool: Add support for histograms in the TSDB dump command. #12775 +* [FEATURE] PromQL: Add warnings (and annotations) to PromQL query results. #12152 #12982 #12988 +* [FEATURE] Remote-write: Add Azure AD OAuth authentication support for remote write requests. #12572 +* [ENHANCEMENT] Remote-write: Add a header to count retried remote write requests. #12729 +* [ENHANCEMENT] TSDB: Improve query performance by re-using iterator when moving between series. #12757 +* [ENHANCEMENT] UI: Move /targets page discovered labels to expandable section #12824 +* [ENHANCEMENT] TSDB: Optimize WBL loading by not sending empty buffers over channel. #12808 +* [ENHANCEMENT] TSDB: Reply WBL mmap markers concurrently. #12801 +* [ENHANCEMENT] Promtool: Add support for specifying series matchers in the TSDB analyze command. #12842 +* [ENHANCEMENT] PromQL: Prevent Prometheus from overallocating memory on subquery with large amount of steps. #12734 +* [ENHANCEMENT] PromQL: Add warning when monotonicity is forced in the input to histogram_quantile. #12931 +* [ENHANCEMENT] Scraping: Optimize sample appending by reducing garbage. #12939 +* [ENHANCEMENT] Storage: Reduce memory allocations in queries that merge series sets. #12938 +* [ENHANCEMENT] UI: Show group interval in rules display. #12943 +* [ENHANCEMENT] Scraping: Save memory when scraping by delaying creation of buffer. #12953 +* [ENHANCEMENT] Agent: Allow ingestion of out-of-order samples. #12897 +* [ENHANCEMENT] Promtool: Improve support for native histograms in TSDB analyze command. #12869 +* [BUGFIX] SD: Ensure that discovery managers are properly canceled. #10569 +* [BUGFIX] TSDB: Fix PostingsForMatchers race with creating new series. #12558 +* [BUGFIX] TSDB: Fix handling of explicit counter reset header in histograms. #12772 +* [BUGFIX] SD: Validate HTTP client configuration in HTTP, EC2, Azure, Uyuni, PuppetDB, and Lightsail SDs. #12762 #12811 #12812 #12815 #12814 #12816 +* [BUGFIX] TSDB: Fix counter reset edgecases causing native histogram panics. #12838 +* [BUGFIX] TSDB: Fix duplicate sample detection at chunk size limit. #12874 +* [BUGFIX] Promtool: Fix errors not being reported in check rules command. #12715 +* [BUGFIX] TSDB: Avoid panics reported in logs when head initialization takes a long time. #12876 +* [BUGFIX] TSDB: Ensure that WBL is repaired when possible. #12406 + ## 2.47.1 / 2023-10-04 * [BUGFIX] Fix duplicate sample detection at chunk size limit #12874 diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 1175bb9a6f..902e9a6e94 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -7,7 +7,7 @@ Julien Pivotto ( / @roidelapluie) and Levi Harrison * `discovery` * `k8s`: Frederic Branczyk ( / @brancz) * `documentation` - * `prometheus-mixin`: Björn Rabenstein ( / @beorn7) + * `prometheus-mixin`: Matthias Loibl ( / @metalmatze) * `storage` * `remote`: Chris Marchbanks ( / @csmarchbanks), Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie ( / @tomwilkie) * `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez) diff --git a/VERSION b/VERSION index 24c6ede7ad..f2bfe03dba 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.47.1 +2.48.0-rc.1 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 569eb56324..7bb6d9caf3 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -12,7 +12,6 @@ // limitations under the License. // The main package for the Prometheus server executable. -// nolint:revive // Many unsued function arguments in this file by design. package main import ( diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index b77dc7826d..f373ebd6e4 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -44,7 +44,7 @@ func sortSamples(samples []backfillSample) { }) } -func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive +func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) samples := []backfillSample{} for ss.Next() { diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 4ff48ce25c..973795a86e 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -411,7 +411,6 @@ func checkExperimental(f bool) { } } -// nolint:revive var lintError = fmt.Errorf("lint error") type lintConfig struct { diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index bfea7c937d..1c06982880 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -35,7 +35,7 @@ type mockQueryRangeAPI struct { samples model.Matrix } -func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive +func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { return mockAPI.samples, v1.Warnings{}, nil } diff --git a/config/config.go b/config/config.go index 0e1c0b782b..4c73f6c496 100644 --- a/config/config.go +++ b/config/config.go @@ -563,6 +563,8 @@ type ScrapeConfig struct { HonorLabels bool `yaml:"honor_labels,omitempty"` // Indicator whether the scraped timestamps should be respected. HonorTimestamps bool `yaml:"honor_timestamps"` + // Indicator whether to track the staleness of the scraped timestamps. + TrackTimestampsStaleness bool `yaml:"track_timestamps_staleness"` // A set of query parameters with which the target is scraped. Params url.Values `yaml:"params,omitempty"` // How frequently to scrape the targets of this scrape config. diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 7200d52dde..708e229a2f 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many legitimately empty blocks in this file. package kubernetes import ( diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index e241c758bc..a168623804 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -15,6 +15,7 @@ package kubernetes import ( "context" + "errors" "fmt" "net" "strconv" @@ -183,14 +184,14 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) cacheSyncs = append(cacheSyncs, e.nodeInf.HasSynced) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { - if ctx.Err() != context.Canceled { + if !errors.Is(ctx.Err(), context.Canceled) { level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache") } return } go func() { - for e.process(ctx, ch) { // nolint:revive + for e.process(ctx, ch) { } }() diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index 697b6f5198..fee4cc7207 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -88,7 +88,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for i.process(ctx, ch) { // nolint:revive + for i.process(ctx, ch) { } }() diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index 6a20e7b1f2..5f9bcd8f48 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for n.process(ctx, ch) { // nolint:revive + for n.process(ctx, ch) { } }() diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 74f74c1f75..88da7bba69 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -131,7 +131,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for p.process(ctx, ch) { // nolint:revive + for p.process(ctx, ch) { } }() diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 7addf0054e..9fcc6644c3 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -91,7 +91,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for s.process(ctx, ch) { // nolint:revive + for s.process(ctx, ch) { } }() diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index cadff5fd2e..308d63a5fc 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -193,7 +193,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } for _, pathUpdate := range d.pathUpdates { // Drain event channel in case the treecache leaks goroutines otherwise. - for range pathUpdate { // nolint:revive + for range pathUpdate { } } d.conn.Close() diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 4138271bc6..dc4ea12e75 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -222,6 +222,14 @@ job_name: # by the target will be ignored. [ honor_timestamps: | default = true ] +# track_timestamps_staleness controls whether Prometheus tracks staleness of +# the metrics that have an explicit timestamps present in scraped data. +# +# If track_timestamps_staleness is set to "true", a staleness marker will be +# inserted in the TSDB when a metric is no longer present or the target +# is down. +[ track_timestamps_staleness: | default = false ] + # Configures the protocol scheme used for requests. [ scheme: | default = http ] diff --git a/docs/querying/api.md b/docs/querying/api.md index 408d32cdab..a268b1cc93 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -679,6 +679,7 @@ URL query parameters: - `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. - `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. - `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. +- `exclude_alerts=`: only return rules, do not return active alerts. ```json $ curl http://localhost:9090/api/v1/rules @@ -1307,4 +1308,4 @@ Enable the OTLP receiver by the feature flag `--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver endpoint is `/api/v1/otlp/v1/metrics`. -*New in v2.47* \ No newline at end of file +*New in v2.47* diff --git a/promql/engine.go b/promql/engine.go index 003d1a400c..88a04f60f0 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1173,9 +1173,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) bufHelpers[i] = make([]EvalSeriesHelper, len(matrixes[i])) for si, series := range matrixes[i] { - h := seriesHelpers[i][si] - prepSeries(series.Metric, &h) - seriesHelpers[i][si] = h + prepSeries(series.Metric, &seriesHelpers[i][si]) } } } @@ -2028,7 +2026,7 @@ func (ev *evaluator) matrixIterSlice( // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; floats[drop].T < mint; drop++ { // nolint:revive + for drop = 0; floats[drop].T < mint; drop++ { } ev.currentSamples -= drop copy(floats, floats[drop:]) @@ -2050,7 +2048,7 @@ func (ev *evaluator) matrixIterSlice( // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive + for drop = 0; histograms[drop].T < mint; drop++ { } copy(histograms, histograms[drop:]) histograms = histograms[:len(histograms)-drop] diff --git a/promql/engine_test.go b/promql/engine_test.go index d628bd128f..731915d243 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -1657,7 +1657,6 @@ func TestRecoverEvaluatorRuntime(t *testing.T) { // Cause a runtime panic. var a []int - //nolint:govet a[123] = 1 } diff --git a/promql/functions.go b/promql/functions.go index 5fd54f6c76..46a0625ca1 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many unsued function arguments in this file by design. package promql import ( diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 58136266fd..379352599d 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -55,7 +55,6 @@ type Statement interface { Node // PromQLStmt ensures that no other type accidentally implements the interface - // nolint:unused PromQLStmt() } diff --git a/promql/parser/lex.go b/promql/parser/lex.go index c8bfcc2e1e..2ec3abd83e 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many legitimately empty blocks in this file. package parser import ( diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 34217697a6..c4d6c89287 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -72,7 +72,6 @@ func WithFunctions(functions map[string]*Function) Opt { } // NewParser returns a new parser. -// nolint:revive func NewParser(input string, opts ...Opt) *parser { p := parserPool.Get().(*parser) @@ -660,9 +659,9 @@ func (p *parser) checkAST(node Node) (typ ValueType) { // This is made a function instead of a variable, so it is lazily evaluated on demand. opRange := func() (r posrange.PositionRange) { // Remove whitespace at the beginning and end of the range. - for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { // nolint:revive + for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { } - for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { // nolint:revive + for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { } return } diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 5624a67613..3f90cc3c19 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -4061,7 +4061,6 @@ func TestRecoverParserRuntime(t *testing.T) { defer p.recover(&err) // Cause a runtime panic. var a []int - //nolint:govet a[123] = 1 } diff --git a/scrape/scrape.go b/scrape/scrape.go index d67297ca7e..10214e5494 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -95,18 +95,19 @@ type labelLimits struct { } type scrapeLoopOptions struct { - target *Target - scraper scraper - sampleLimit int - bucketLimit int - labelLimits *labelLimits - honorLabels bool - honorTimestamps bool - interval time.Duration - timeout time.Duration - scrapeClassicHistograms bool - mrc []*relabel.Config - cache *scrapeCache + target *Target + scraper scraper + sampleLimit int + bucketLimit int + labelLimits *labelLimits + honorLabels bool + honorTimestamps bool + trackTimestampsStaleness bool + interval time.Duration + timeout time.Duration + scrapeClassicHistograms bool + mrc []*relabel.Config + cache *scrapeCache } const maxAheadTime = 10 * time.Minute @@ -160,6 +161,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed cache, offsetSeed, opts.honorTimestamps, + opts.trackTimestampsStaleness, opts.sampleLimit, opts.bucketLimit, opts.labelLimits, @@ -270,9 +272,10 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), } - honorLabels = sp.config.HonorLabels - honorTimestamps = sp.config.HonorTimestamps - mrc = sp.config.MetricRelabelConfigs + honorLabels = sp.config.HonorLabels + honorTimestamps = sp.config.HonorTimestamps + trackTimestampsStaleness = sp.config.TrackTimestampsStaleness + mrc = sp.config.MetricRelabelConfigs ) sp.targetMtx.Lock() @@ -298,17 +301,18 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { acceptHeader: acceptHeader(cfg.ScrapeProtocols), } newLoop = sp.newLoop(scrapeLoopOptions{ - target: t, - scraper: s, - sampleLimit: sampleLimit, - bucketLimit: bucketLimit, - labelLimits: labelLimits, - honorLabels: honorLabels, - honorTimestamps: honorTimestamps, - mrc: mrc, - cache: cache, - interval: interval, - timeout: timeout, + target: t, + scraper: s, + sampleLimit: sampleLimit, + bucketLimit: bucketLimit, + labelLimits: labelLimits, + honorLabels: honorLabels, + honorTimestamps: honorTimestamps, + trackTimestampsStaleness: trackTimestampsStaleness, + mrc: mrc, + cache: cache, + interval: interval, + timeout: timeout, }) ) if err != nil { @@ -396,10 +400,11 @@ func (sp *scrapePool) sync(targets []*Target) { labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), } - honorLabels = sp.config.HonorLabels - honorTimestamps = sp.config.HonorTimestamps - mrc = sp.config.MetricRelabelConfigs - scrapeClassicHistograms = sp.config.ScrapeClassicHistograms + honorLabels = sp.config.HonorLabels + honorTimestamps = sp.config.HonorTimestamps + trackTimestampsStaleness = sp.config.TrackTimestampsStaleness + mrc = sp.config.MetricRelabelConfigs + scrapeClassicHistograms = sp.config.ScrapeClassicHistograms ) sp.targetMtx.Lock() @@ -421,17 +426,18 @@ func (sp *scrapePool) sync(targets []*Target) { metrics: sp.metrics, } l := sp.newLoop(scrapeLoopOptions{ - target: t, - scraper: s, - sampleLimit: sampleLimit, - bucketLimit: bucketLimit, - labelLimits: labelLimits, - honorLabels: honorLabels, - honorTimestamps: honorTimestamps, - mrc: mrc, - interval: interval, - timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, + target: t, + scraper: s, + sampleLimit: sampleLimit, + bucketLimit: bucketLimit, + labelLimits: labelLimits, + honorLabels: honorLabels, + honorTimestamps: honorTimestamps, + trackTimestampsStaleness: trackTimestampsStaleness, + mrc: mrc, + interval: interval, + timeout: timeout, + scrapeClassicHistograms: scrapeClassicHistograms, }) if err != nil { l.setForcedError(err) @@ -750,21 +756,22 @@ type cacheEntry struct { } type scrapeLoop struct { - scraper scraper - l log.Logger - cache *scrapeCache - lastScrapeSize int - buffers *pool.Pool - offsetSeed uint64 - honorTimestamps bool - forcedErr error - forcedErrMtx sync.Mutex - sampleLimit int - bucketLimit int - labelLimits *labelLimits - interval time.Duration - timeout time.Duration - scrapeClassicHistograms bool + scraper scraper + l log.Logger + cache *scrapeCache + lastScrapeSize int + buffers *pool.Pool + offsetSeed uint64 + honorTimestamps bool + trackTimestampsStaleness bool + forcedErr error + forcedErrMtx sync.Mutex + sampleLimit int + bucketLimit int + labelLimits *labelLimits + interval time.Duration + timeout time.Duration + scrapeClassicHistograms bool appender func(ctx context.Context) storage.Appender sampleMutator labelsMutator @@ -1046,6 +1053,7 @@ func newScrapeLoop(ctx context.Context, cache *scrapeCache, offsetSeed uint64, honorTimestamps bool, + trackTimestampsStaleness bool, sampleLimit int, bucketLimit int, labelLimits *labelLimits, @@ -1080,27 +1088,28 @@ func newScrapeLoop(ctx context.Context, } sl := &scrapeLoop{ - scraper: sc, - buffers: buffers, - cache: cache, - appender: appender, - sampleMutator: sampleMutator, - reportSampleMutator: reportSampleMutator, - stopped: make(chan struct{}), - offsetSeed: offsetSeed, - l: l, - parentCtx: ctx, - appenderCtx: appenderCtx, - honorTimestamps: honorTimestamps, - sampleLimit: sampleLimit, - bucketLimit: bucketLimit, - labelLimits: labelLimits, - interval: interval, - timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, - reportExtraMetrics: reportExtraMetrics, - appendMetadataToWAL: appendMetadataToWAL, - metrics: metrics, + scraper: sc, + buffers: buffers, + cache: cache, + appender: appender, + sampleMutator: sampleMutator, + reportSampleMutator: reportSampleMutator, + stopped: make(chan struct{}), + offsetSeed: offsetSeed, + l: l, + parentCtx: ctx, + appenderCtx: appenderCtx, + honorTimestamps: honorTimestamps, + trackTimestampsStaleness: trackTimestampsStaleness, + sampleLimit: sampleLimit, + bucketLimit: bucketLimit, + labelLimits: labelLimits, + interval: interval, + timeout: timeout, + scrapeClassicHistograms: scrapeClassicHistograms, + reportExtraMetrics: reportExtraMetrics, + appendMetadataToWAL: appendMetadataToWAL, + metrics: metrics, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1547,7 +1556,7 @@ loop: } if !ok { - if parsedTimestamp == nil { + if parsedTimestamp == nil || sl.trackTimestampsStaleness { // Bypass staleness logic if there is an explicit timestamp. sl.cache.trackStaleness(hash, lset) } @@ -1628,7 +1637,7 @@ loop: func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { switch errors.Cause(err) { case nil: - if tp == nil && ce != nil { + if (tp == nil || sl.trackTimestampsStaleness) && ce != nil { sl.cache.trackStaleness(ce.hash, ce.lset) } return true, nil diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 672f466146..7be3f34610 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -650,6 +650,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) { nopMutator, nil, nil, 0, true, + false, 0, 0, nil, 1, @@ -724,6 +725,7 @@ func TestScrapeLoopStop(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 10*time.Millisecond, @@ -802,6 +804,7 @@ func TestScrapeLoopRun(t *testing.T) { nil, 0, true, + false, 0, 0, nil, time.Second, @@ -859,6 +862,7 @@ func TestScrapeLoopRun(t *testing.T) { nil, 0, true, + false, 0, 0, nil, time.Second, @@ -920,6 +924,7 @@ func TestScrapeLoopForcedErr(t *testing.T) { nil, 0, true, + false, 0, 0, nil, time.Second, @@ -980,6 +985,7 @@ func TestScrapeLoopMetadata(t *testing.T) { cache, 0, true, + false, 0, 0, nil, 0, @@ -1039,6 +1045,7 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) { nil, 0, true, + false, 0, 0, nil, 0, @@ -1101,6 +1108,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -1181,6 +1189,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 10*time.Millisecond, @@ -1246,6 +1255,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 10*time.Millisecond, @@ -1314,6 +1324,7 @@ func TestScrapeLoopCache(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 10*time.Millisecond, @@ -1399,6 +1410,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 10*time.Millisecond, @@ -1515,6 +1527,7 @@ func TestScrapeLoopAppend(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -1613,7 +1626,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { }, nil, func(ctx context.Context) storage.Appender { return app }, - nil, 0, true, 0, 0, nil, 0, 0, false, false, false, nil, false, newTestScrapeMetrics(t), + nil, 0, true, false, 0, 0, nil, 0, 0, false, false, false, nil, false, newTestScrapeMetrics(t), ) slApp := sl.appender(context.Background()) _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) @@ -1644,6 +1657,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -1704,6 +1718,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { nil, 0, true, + false, app.limit, 0, nil, 0, @@ -1783,6 +1798,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { nil, 0, true, + false, app.limit, 0, nil, 0, @@ -1883,6 +1899,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -1933,6 +1950,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -1986,6 +2004,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -2313,6 +2332,7 @@ metric: < nil, 0, true, + false, 0, 0, nil, 0, @@ -2402,6 +2422,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -2456,6 +2477,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 10*time.Millisecond, @@ -2494,6 +2516,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 10*time.Millisecond, @@ -2545,6 +2568,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T nil, 0, true, + false, 0, 0, nil, 0, @@ -2592,6 +2616,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -2883,6 +2908,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) { func(ctx context.Context) storage.Appender { return capp }, nil, 0, true, + false, 0, 0, nil, 0, @@ -2926,6 +2952,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) { func(ctx context.Context) storage.Appender { return capp }, nil, 0, false, + false, 0, 0, nil, 0, @@ -2968,6 +2995,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -3028,6 +3056,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -3293,6 +3322,7 @@ func TestScrapeAddFast(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 0, @@ -3381,6 +3411,7 @@ func TestScrapeReportSingleAppender(t *testing.T) { nil, 0, true, + false, 0, 0, nil, 10*time.Millisecond, @@ -3585,6 +3616,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) { nil, 0, true, + false, 0, 0, &test.labelLimits, 0, @@ -3646,3 +3678,68 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) { require.Equal(t, "3s", sp.ActiveTargets()[0].labels.Get(model.ScrapeIntervalLabel)) require.Equal(t, "750ms", sp.ActiveTargets()[0].labels.Get(model.ScrapeTimeoutLabel)) } + +func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) { + appender := &collectResultAppender{} + var ( + signal = make(chan struct{}, 1) + scraper = &testScraper{} + app = func(ctx context.Context) storage.Appender { return appender } + ) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newScrapeLoop(ctx, + scraper, + nil, nil, + nopMutator, + nopMutator, + app, + nil, + 0, + true, + true, + 0, 0, + nil, + 10*time.Millisecond, + time.Hour, + false, + false, + false, + nil, + false, + newTestScrapeMetrics(t), + ) + // Succeed once, several failures, then stop. + numScrapes := 0 + + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + numScrapes++ + + switch numScrapes { + case 1: + w.Write([]byte(fmt.Sprintf("metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond)))) + return nil + case 5: + cancel() + } + return errors.New("scrape failed") + } + + go func() { + sl.run(nil) + signal <- struct{}{} + }() + + select { + case <-signal: + case <-time.After(5 * time.Second): + t.Fatalf("Scrape wasn't stopped.") + } + + // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for + // each scrape successful or not. + require.Equal(t, 27, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") + require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), + "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) +} diff --git a/scrape/target.go b/scrape/target.go index 8b745a9c49..227687372d 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -145,9 +145,7 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) { func (t *Target) hash() uint64 { h := fnv.New64a() - //nolint: errcheck h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash()))) - //nolint: errcheck h.Write([]byte(t.URL().String())) return h.Sum64() diff --git a/storage/buffer.go b/storage/buffer.go index a692570ede..d2d89e0425 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -284,7 +284,8 @@ func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing { case chunkenc.ValFloatHistogram: r.fhBuf = make([]fhSample, size) default: - r.iBuf = make([]chunks.Sample, size) + // Do not initialize anything because the 1st sample will be + // added to one of the other bufs anyway. } return r } @@ -294,6 +295,12 @@ func (r *sampleRing) reset() { r.i = -1 r.f = 0 r.bufInUse = noBuf + + // The first sample after the reset will always go to a specialized + // buffer. If we later need to change to the interface buffer, we'll + // copy from the specialized buffer to the interface buffer. For that to + // work properly, we have to reset the interface buffer here, too. + r.iBuf = r.iBuf[:0] } // Returns the current iterator. Invalidates previously returned iterators. @@ -441,6 +448,7 @@ func (r *sampleRing) add(s chunks.Sample) { } // The new sample isn't a fit for the already existing // ones. Copy the latter into the interface buffer where needed. + // The interface buffer is assumed to be of length zero at this point. switch r.bufInUse { case fBuf: for _, s := range r.fBuf { diff --git a/storage/buffer_test.go b/storage/buffer_test.go index bc79a79ba8..c2542f3d9c 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -90,6 +90,54 @@ func TestSampleRing(t *testing.T) { } } +func TestSampleRingMixed(t *testing.T) { + h1 := tsdbutil.GenerateTestHistogram(1) + h2 := tsdbutil.GenerateTestHistogram(2) + + // With ValNone as the preferred type, nothing should be initialized. + r := newSampleRing(10, 2, chunkenc.ValNone) + require.Zero(t, len(r.fBuf)) + require.Zero(t, len(r.hBuf)) + require.Zero(t, len(r.fhBuf)) + require.Zero(t, len(r.iBuf)) + + // But then mixed adds should work as expected. + r.addF(fSample{t: 1, f: 3.14}) + r.addH(hSample{t: 2, h: h1}) + + it := r.iterator() + + require.Equal(t, chunkenc.ValFloat, it.Next()) + ts, f := it.At() + require.Equal(t, int64(1), ts) + require.Equal(t, 3.14, f) + require.Equal(t, chunkenc.ValHistogram, it.Next()) + var h *histogram.Histogram + ts, h = it.AtHistogram() + require.Equal(t, int64(2), ts) + require.Equal(t, h1, h) + require.Equal(t, chunkenc.ValNone, it.Next()) + + r.reset() + it = r.iterator() + require.Equal(t, chunkenc.ValNone, it.Next()) + + r.addF(fSample{t: 3, f: 4.2}) + r.addH(hSample{t: 4, h: h2}) + + it = r.iterator() + + require.Equal(t, chunkenc.ValFloat, it.Next()) + ts, f = it.At() + require.Equal(t, int64(3), ts) + require.Equal(t, 4.2, f) + require.Equal(t, chunkenc.ValHistogram, it.Next()) + ts, h = it.AtHistogram() + require.Equal(t, int64(4), ts) + require.Equal(t, h2, h) + require.Equal(t, chunkenc.ValNone, it.Next()) +} + func TestBufferedSeriesIterator(t *testing.T) { var it *BufferedSeriesIterator @@ -211,7 +259,7 @@ func BenchmarkBufferedSeriesIterator(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for it.Next() != chunkenc.ValNone { // nolint:revive + for it.Next() != chunkenc.ValNone { // Scan everything. } require.NoError(b, it.Err()) diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go index 1c87119283..4791113800 100644 --- a/storage/memoized_iterator_test.go +++ b/storage/memoized_iterator_test.go @@ -112,7 +112,7 @@ func BenchmarkMemoizedSeriesIterator(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for it.Next() != chunkenc.ValNone { // nolint:revive + for it.Next() != chunkenc.ValNone { // Scan everything. } require.NoError(b, it.Err()) diff --git a/storage/merge.go b/storage/merge.go index ad9f67d12c..f4d34b3584 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -502,9 +502,9 @@ func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType { // If any iterator is reporting an error, abort. return chunkenc.ValNone } - } else { - heap.Push(&c.h, iter) + continue } + heap.Push(&c.h, iter) } if len(c.h) > 0 { c.curr = heap.Pop(&c.h).(chunkenc.Iterator) @@ -599,7 +599,9 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType { if c.curr.Err() != nil { // Abort if we've hit an error. return chunkenc.ValNone - } else if len(c.h) == 0 { + } + + if len(c.h) == 0 { // No iterator left to iterate. c.curr = nil return chunkenc.ValNone diff --git a/storage/remote/azuread/azuread.go b/storage/remote/azuread/azuread.go index cb4587b02a..d8a1bf7942 100644 --- a/storage/remote/azuread/azuread.go +++ b/storage/remote/azuread/azuread.go @@ -62,7 +62,7 @@ type OAuthConfig struct { } // AzureADConfig is used to store the config values. -type AzureADConfig struct { // nolint:revive +type AzureADConfig struct { // ManagedIdentity is the managed identity that is being used to authenticate. ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"` diff --git a/storage/remote/client_test.go b/storage/remote/client_test.go index 9217e1c7e2..33ae7e4686 100644 --- a/storage/remote/client_test.go +++ b/storage/remote/client_test.go @@ -136,7 +136,7 @@ func TestClientRetryAfter(t *testing.T) { err = c.Store(context.Background(), []byte{}, 0) require.Equal(t, tc.expectedRecoverable, errors.As(err, &recErr), "Mismatch in expected recoverable error status.") if tc.expectedRecoverable { - require.Equal(t, tc.expectedRetryAfter, err.(RecoverableError).retryAfter) + require.Equal(t, tc.expectedRetryAfter, recErr.retryAfter) } }) } diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go index e2702c9f77..3a99e3360c 100644 --- a/storage/remote/read_handler.go +++ b/storage/remote/read_handler.go @@ -15,6 +15,7 @@ package remote import ( "context" + "errors" "net/http" "strings" "sync" @@ -169,7 +170,8 @@ func (h *readHandler) remoteReadSamples( } return nil }(); err != nil { - if httpErr, ok := err.(HTTPError); ok { + var httpErr HTTPError + if errors.As(err, &httpErr) { http.Error(w, httpErr.Error(), httpErr.Status()) return } @@ -241,7 +243,8 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } return nil }(); err != nil { - if httpErr, ok := err.(HTTPError); ok { + var httpErr HTTPError + if errors.As(err, &httpErr) { http.Error(w, httpErr.Error(), httpErr.Status()) return } diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 6c0cd8a29b..a0dd3940e2 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -66,9 +66,9 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } err = h.write(r.Context(), req) - switch err { - case nil: - case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp: + switch { + case err == nil: + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Indicated an out of order sample is a bad request to prevent retries. http.Error(w, err.Error(), http.StatusBadRequest) return @@ -222,9 +222,9 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { Timeseries: prwMetrics, }) - switch err { - case nil: - case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp: + switch { + case err == nil: + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Indicated an out of order sample is a bad request to prevent retries. http.Error(w, err.Error(), http.StatusBadRequest) return diff --git a/template/template.go b/template/template.go index 01f6ec9a8a..5d72a7e83e 100644 --- a/template/template.go +++ b/template/template.go @@ -181,7 +181,7 @@ func NewTemplateExpander( return html_template.HTML(text) }, "match": regexp.MatchString, - "title": strings.Title, // nolint:staticcheck + "title": strings.Title, //nolint:staticcheck "toUpper": strings.ToUpper, "toLower": strings.ToLower, "graphLink": strutil.GraphLinkForExpression, diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index 505d112455..dd35b9cae4 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -103,7 +103,7 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) { // To get an appender, we must know the state it would have if we had // appended all existing data from scratch. We iterate through the end // and populate via the iterator's state. - for it.Next() == ValFloatHistogram { // nolint:revive + for it.Next() == ValFloatHistogram { } if err := it.Err(); err != nil { return nil, err diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 7629ae4df8..a54125fdcb 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -157,7 +157,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) { // 3. Now recycle an iterator that was never used to access anything. itX := c.Iterator(nil) - for itX.Next() == ValFloatHistogram { // nolint:revive + for itX.Next() == ValFloatHistogram { // Just iterate through without accessing anything. } it3 := c.iterator(itX) diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index 847d893761..7c6f07f1f2 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -114,7 +114,7 @@ func (c *HistogramChunk) Appender() (Appender, error) { // To get an appender, we must know the state it would have if we had // appended all existing data from scratch. We iterate through the end // and populate via the iterator's state. - for it.Next() == ValHistogram { // nolint:revive + for it.Next() == ValHistogram { } if err := it.Err(); err != nil { return nil, err diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index b983d7fe6b..25e415da76 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -162,7 +162,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) { // 3. Now recycle an iterator that was never used to access anything. itX := c.Iterator(nil) - for itX.Next() == ValHistogram { // nolint:revive + for itX.Next() == ValHistogram { // Just iterate through without accessing anything. } it3 := c.iterator(itX) diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 089a51a644..d54e5dbab1 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -99,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) { // To get an appender we must know the state it would have if we had // appended all existing data from scratch. // We iterate through the end and populate via the iterator's state. - for it.Next() != ValNone { // nolint:revive + for it.Next() != ValNone { } if err := it.Err(); err != nil { return nil, err diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 85427a6e95..209c3a26a6 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1953,7 +1953,8 @@ func TestQuerierWithBoundaryChunks(t *testing.T) { // The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block. b, ws, err := q.LabelValues(ctx, "blockID") require.NoError(t, err) - require.Equal(t, annotations.Annotations{}, ws) + var nilAnnotations annotations.Annotations + require.Equal(t, nilAnnotations, ws) require.Equal(t, []string{"1", "2"}, b) } diff --git a/tsdb/errors/errors.go b/tsdb/errors/errors.go index aa0a4b1b36..21449e8950 100644 --- a/tsdb/errors/errors.go +++ b/tsdb/errors/errors.go @@ -25,7 +25,7 @@ import ( type multiError []error // NewMulti returns multiError with provided errors added if not nil. -func NewMulti(errs ...error) multiError { // nolint:revive +func NewMulti(errs ...error) multiError { m := multiError{} m.Add(errs...) return m diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 403ab99d29..5964c7667b 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many legitimately empty blocks in this file. package tsdb import ( diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index a7894410ba..04fb4ba45a 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many legitimately empty blocks in this file. package tsdb import ( diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 7b24d8c20a..b7cf0a9595 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many unused function arguments in this file by design. package tsdb import ( diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index e748189e38..501879d01b 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -287,7 +287,7 @@ func BenchmarkQuerierSelect(b *testing.B) { } ss := q.Select(context.Background(), sorted, hints, matcher) - for ss.Next() { // nolint:revive + for ss.Next() { } require.NoError(b, ss.Err()) } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 9916febef2..053fc45730 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many unsued function arguments in this file by design. package tsdb import ( diff --git a/tsdb/wal.go b/tsdb/wal.go index 3a410fb636..af83127bba 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many unsued function arguments in this file by design. package tsdb import ( diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 309bee755e..9d2ec50d22 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -541,7 +541,7 @@ func TestReaderData(t *testing.T) { require.NoError(t, err) reader := fn(sr) - for reader.Next() { // nolint:revive + for reader.Next() { } require.NoError(t, reader.Err()) diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index fd65fca07a..f62817e19a 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -971,7 +971,6 @@ type segmentBufReader struct { off int // Offset of read data into current segment. } -// nolint:revive // TODO: Consider exporting segmentBufReader func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { if len(segs) == 0 { return &segmentBufReader{} @@ -983,7 +982,6 @@ func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { } } -// nolint:revive func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (sbr *segmentBufReader, err error) { if offset == 0 || len(segs) == 0 { return NewSegmentBufReader(segs...), nil diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index e954a6435c..ba2a506981 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -163,7 +163,7 @@ func TestWALRepair_ReadingError(t *testing.T) { sr := NewSegmentBufReader(s) require.NoError(t, err) r := NewReader(sr) - for r.Next() { // nolint:revive + for r.Next() { } // Close the segment so we don't break things on Windows. diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index 52cfb114b1..9d0b11a089 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -50,6 +50,9 @@ func (a *Annotations) Add(err error) Annotations { // the first in-place, and returns the merged first Annotation for convenience. func (a *Annotations) Merge(aa Annotations) Annotations { if *a == nil { + if aa == nil { + return nil + } *a = Annotations{} } for key, val := range aa { @@ -91,7 +94,6 @@ func (a Annotations) AsStrings(query string, maxAnnos int) []string { return arr } -//nolint:revive // Ignore ST1012 var ( // Currently there are only 2 types, warnings and info. // For now, info are visually identical with warnings as we have not updated @@ -122,6 +124,10 @@ func (e annoErr) Error() string { return fmt.Sprintf("%s (%s)", e.Err, e.PositionRange.StartPosInput(e.Query, 0)) } +func (e annoErr) Unwrap() error { + return e.Err +} + // NewInvalidQuantileWarning is used when the user specifies an invalid quantile // value, i.e. a float that is outside the range [0, 1] or NaN. func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) annoErr { diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go index bece9d5c83..06356bb0bb 100644 --- a/util/treecache/treecache.go +++ b/util/treecache/treecache.go @@ -116,7 +116,7 @@ func (tc *ZookeeperTreeCache) Stop() { tc.stop <- struct{}{} go func() { // Drain tc.head.events so that go routines can make progress and exit. - for range tc.head.events { // nolint:revive + for range tc.head.events { } }() go func() { diff --git a/util/zeropool/pool_test.go b/util/zeropool/pool_test.go index 507687886f..638a035885 100644 --- a/util/zeropool/pool_test.go +++ b/util/zeropool/pool_test.go @@ -149,13 +149,13 @@ func BenchmarkSyncPoolNewPointer(b *testing.B) { // Warmup item := pool.Get().(*[]byte) - pool.Put(item) //nolint:staticcheck // This allocates. + pool.Put(item) b.ResetTimer() for i := 0; i < b.N; i++ { item := pool.Get().(*[]byte) buf := *item - pool.Put(&buf) //nolint:staticcheck // New pointer. + pool.Put(&buf) } } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 1a54f23a61..79569a657d 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1373,6 +1373,11 @@ func (api *API) rules(r *http.Request) apiFuncResult { returnAlerts := typ == "" || typ == "alert" returnRecording := typ == "" || typ == "record" + excludeAlerts, err := parseExcludeAlerts(r) + if err != nil { + return invalidParamError(err, "exclude_alerts") + } + rgs := make([]*RuleGroup, 0, len(ruleGroups)) for _, grp := range ruleGroups { if len(rgSet) > 0 { @@ -1414,6 +1419,10 @@ func (api *API) rules(r *http.Request) apiFuncResult { if !returnAlerts { break } + var activeAlerts []*Alert + if !excludeAlerts { + activeAlerts = rulesAlertsToAPIAlerts(rule.ActiveAlerts()) + } enrichedRule = AlertingRule{ State: rule.State().String(), Name: rule.Name(), @@ -1422,7 +1431,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { KeepFiringFor: rule.KeepFiringFor().Seconds(), Labels: rule.Labels(), Annotations: rule.Annotations(), - Alerts: rulesAlertsToAPIAlerts(rule.ActiveAlerts()), + Alerts: activeAlerts, Health: rule.Health(), LastError: lastError, EvaluationTime: rule.GetEvaluationDuration().Seconds(), @@ -1462,6 +1471,20 @@ func (api *API) rules(r *http.Request) apiFuncResult { return apiFuncResult{res, nil, nil, nil} } +func parseExcludeAlerts(r *http.Request) (bool, error) { + excludeAlertsParam := strings.ToLower(r.URL.Query().Get("exclude_alerts")) + + if excludeAlertsParam == "" { + return false, nil + } + + excludeAlerts, err := strconv.ParseBool(excludeAlertsParam) + if err != nil { + return false, fmt.Errorf("error converting exclude_alerts: %w", err) + } + return excludeAlerts, nil +} + type prometheusConfig struct { YAML string `json:"yaml"` } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 475b4bab54..320d174fce 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -209,10 +209,12 @@ func (t testAlertmanagerRetriever) toFactory() func(context.Context) Alertmanage } type rulesRetrieverMock struct { - testing *testing.T + alertingRules []*rules.AlertingRule + ruleGroups []*rules.Group + testing *testing.T } -func (m rulesRetrieverMock) AlertingRules() []*rules.AlertingRule { +func (m *rulesRetrieverMock) CreateAlertingRules() { expr1, err := parser.ParseExpr(`absent(test_metric3) != 1`) if err != nil { m.testing.Fatalf("unable to parse alert expression: %s", err) @@ -222,6 +224,11 @@ func (m rulesRetrieverMock) AlertingRules() []*rules.AlertingRule { m.testing.Fatalf("Unable to parse alert expression: %s", err) } + expr3, err := parser.ParseExpr(`vector(1)`) + if err != nil { + m.testing.Fatalf("Unable to parse alert expression: %s", err) + } + rule1 := rules.NewAlertingRule( "test_metric3", expr1, @@ -246,15 +253,29 @@ func (m rulesRetrieverMock) AlertingRules() []*rules.AlertingRule { true, log.NewNopLogger(), ) + rule3 := rules.NewAlertingRule( + "test_metric5", + expr3, + time.Second, + 0, + labels.FromStrings("name", "tm5"), + labels.Labels{}, + labels.FromStrings("name", "tm5"), + "", + false, + log.NewNopLogger(), + ) + var r []*rules.AlertingRule r = append(r, rule1) r = append(r, rule2) - return r + r = append(r, rule3) + m.alertingRules = r } -func (m rulesRetrieverMock) RuleGroups() []*rules.Group { - var ar rulesRetrieverMock - arules := ar.AlertingRules() +func (m *rulesRetrieverMock) CreateRuleGroups() { + m.CreateAlertingRules() + arules := m.AlertingRules() storage := teststorage.New(m.testing) defer storage.Close() @@ -271,6 +292,7 @@ func (m rulesRetrieverMock) RuleGroups() []*rules.Group { Appendable: storage, Context: context.Background(), Logger: log.NewNopLogger(), + NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {}, } var r []rules.Rule @@ -294,10 +316,18 @@ func (m rulesRetrieverMock) RuleGroups() []*rules.Group { ShouldRestore: false, Opts: opts, }) - return []*rules.Group{group} + m.ruleGroups = []*rules.Group{group} } -func (m rulesRetrieverMock) toFactory() func(context.Context) RulesRetriever { +func (m *rulesRetrieverMock) AlertingRules() []*rules.AlertingRule { + return m.alertingRules +} + +func (m *rulesRetrieverMock) RuleGroups() []*rules.Group { + return m.ruleGroups +} + +func (m *rulesRetrieverMock) toFactory() func(context.Context) RulesRetriever { return func(context.Context) RulesRetriever { return m } } @@ -380,12 +410,14 @@ func TestEndpoints(t *testing.T) { now := time.Now() t.Run("local", func(t *testing.T) { - var algr rulesRetrieverMock + algr := rulesRetrieverMock{} algr.testing = t - algr.AlertingRules() + algr.CreateAlertingRules() + algr.CreateRuleGroups() - algr.RuleGroups() + g := algr.RuleGroups() + g[0].Eval(context.Background(), time.Now()) testTargetRetriever := setupTestTargetRetriever(t) @@ -442,12 +474,14 @@ func TestEndpoints(t *testing.T) { }) require.NoError(t, err) - var algr rulesRetrieverMock + algr := rulesRetrieverMock{} algr.testing = t - algr.AlertingRules() + algr.CreateAlertingRules() + algr.CreateRuleGroups() - algr.RuleGroups() + g := algr.RuleGroups() + g[0].Eval(context.Background(), time.Now()) testTargetRetriever := setupTestTargetRetriever(t) @@ -1036,6 +1070,36 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E sorter func(interface{}) metadata []targetMetadata exemplars []exemplar.QueryResult + zeroFunc func(interface{}) + } + + rulesZeroFunc := func(i interface{}) { + if i != nil { + v := i.(*RuleDiscovery) + for _, ruleGroup := range v.RuleGroups { + ruleGroup.EvaluationTime = float64(0) + ruleGroup.LastEvaluation = time.Time{} + for k, rule := range ruleGroup.Rules { + switch r := rule.(type) { + case AlertingRule: + r.LastEvaluation = time.Time{} + r.EvaluationTime = float64(0) + r.LastError = "" + r.Health = "ok" + for _, alert := range r.Alerts { + alert.ActiveAt = nil + } + ruleGroup.Rules[k] = r + case RecordingRule: + r.LastEvaluation = time.Time{} + r.EvaluationTime = float64(0) + r.LastError = "" + r.Health = "ok" + ruleGroup.Rules[k] = r + } + } + } + } } tests := []test{ @@ -1988,7 +2052,22 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E { endpoint: api.alerts, response: &AlertDiscovery{ - Alerts: []*Alert{}, + Alerts: []*Alert{ + { + Labels: labels.FromStrings("alertname", "test_metric5", "name", "tm5"), + Annotations: labels.Labels{}, + State: "pending", + Value: "1e+00", + }, + }, + }, + zeroFunc: func(i interface{}) { + if i != nil { + v := i.(*AlertDiscovery) + for _, alert := range v.Alerts { + alert.ActiveAt = nil + } + } }, }, { @@ -2009,7 +2088,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E Labels: labels.Labels{}, Annotations: labels.Labels{}, Alerts: []*Alert{}, - Health: "unknown", + Health: "ok", Type: "alerting", }, AlertingRule{ @@ -2020,20 +2099,98 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E Labels: labels.Labels{}, Annotations: labels.Labels{}, Alerts: []*Alert{}, - Health: "unknown", + Health: "ok", Type: "alerting", }, + AlertingRule{ + State: "pending", + Name: "test_metric5", + Query: "vector(1)", + Duration: 1, + Labels: labels.FromStrings("name", "tm5"), + Annotations: labels.Labels{}, + Alerts: []*Alert{ + { + Labels: labels.FromStrings("alertname", "test_metric5", "name", "tm5"), + Annotations: labels.Labels{}, + State: "pending", + Value: "1e+00", + }, + }, + Health: "ok", + Type: "alerting", + }, RecordingRule{ Name: "recording-rule-1", Query: "vector(1)", Labels: labels.Labels{}, - Health: "unknown", + Health: "ok", Type: "recording", }, }, }, }, }, + zeroFunc: rulesZeroFunc, + }, + { + endpoint: api.rules, + query: url.Values{ + "exclude_alerts": []string{"true"}, + }, + response: &RuleDiscovery{ + RuleGroups: []*RuleGroup{ + { + Name: "grp", + File: "/path/to/file", + Interval: 1, + Limit: 0, + Rules: []Rule{ + AlertingRule{ + State: "inactive", + Name: "test_metric3", + Query: "absent(test_metric3) != 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: nil, + Health: "ok", + Type: "alerting", + }, + AlertingRule{ + State: "inactive", + Name: "test_metric4", + Query: "up == 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: nil, + Health: "ok", + Type: "alerting", + }, + AlertingRule{ + State: "pending", + Name: "test_metric5", + Query: "vector(1)", + Duration: 1, + Labels: labels.FromStrings("name", "tm5"), + Annotations: labels.Labels{}, + Alerts: nil, + Health: "ok", + Type: "alerting", + }, + RecordingRule{ + Name: "recording-rule-1", + Query: "vector(1)", + Labels: labels.Labels{}, + Health: "ok", + Type: "recording", + }, + }, + }, + }, + }, + zeroFunc: rulesZeroFunc, }, { endpoint: api.rules, @@ -2056,7 +2213,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E Labels: labels.Labels{}, Annotations: labels.Labels{}, Alerts: []*Alert{}, - Health: "unknown", + Health: "ok", Type: "alerting", }, AlertingRule{ @@ -2067,13 +2224,32 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E Labels: labels.Labels{}, Annotations: labels.Labels{}, Alerts: []*Alert{}, - Health: "unknown", + Health: "ok", Type: "alerting", }, + AlertingRule{ + State: "pending", + Name: "test_metric5", + Query: "vector(1)", + Duration: 1, + Labels: labels.FromStrings("name", "tm5"), + Annotations: labels.Labels{}, + Alerts: []*Alert{ + { + Labels: labels.FromStrings("alertname", "test_metric5", "name", "tm5"), + Annotations: labels.Labels{}, + State: "pending", + Value: "1e+00", + }, + }, + Health: "ok", + Type: "alerting", + }, }, }, }, }, + zeroFunc: rulesZeroFunc, }, { endpoint: api.rules, @@ -2092,13 +2268,14 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E Name: "recording-rule-1", Query: "vector(1)", Labels: labels.Labels{}, - Health: "unknown", + Health: "ok", Type: "recording", }, }, }, }, }, + zeroFunc: rulesZeroFunc, }, { endpoint: api.rules, @@ -2119,13 +2296,14 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E Labels: labels.Labels{}, Annotations: labels.Labels{}, Alerts: []*Alert{}, - Health: "unknown", + Health: "ok", Type: "alerting", }, }, }, }, }, + zeroFunc: rulesZeroFunc, }, { endpoint: api.rules, @@ -2151,13 +2329,14 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E Labels: labels.Labels{}, Annotations: labels.Labels{}, Alerts: []*Alert{}, - Health: "unknown", + Health: "ok", Type: "alerting", }, }, }, }, }, + zeroFunc: rulesZeroFunc, }, { endpoint: api.queryExemplars, @@ -2696,6 +2875,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E assertAPIResponseMetadataLen(t, res.data, test.responseMetadataTotal) } } else { + if test.zeroFunc != nil { + test.zeroFunc(res.data) + } assertAPIResponse(t, res.data, test.response) } }) diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index 4673af201e..38ca5b62c0 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many unsued function arguments in this file by design. package v1 import ( diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 1524dc4129..0e62d640ab 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.47.0", + "version": "0.48.0-rc.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.47.0", + "@prometheus-io/lezer-promql": "0.48.0-rc.1", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 92dc7a7a46..d38b3288de 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.47.0", + "version": "0.48.0-rc.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index eabb75cb1f..f951aba10d 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.46.0", + "version": "0.48.0-rc.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.46.0", + "version": "0.48.0-rc.1", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.47.0", + "version": "0.48.0-rc.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.47.0", + "@prometheus-io/lezer-promql": "0.48.0-rc.1", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -70,7 +70,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.47.0", + "version": "0.48.0-rc.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.2.3", @@ -20764,7 +20764,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.47.0", + "version": "0.48.0-rc.1", "dependencies": { "@codemirror/autocomplete": "^6.7.1", "@codemirror/commands": "^6.2.4", @@ -20782,7 +20782,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.47.0", + "@prometheus-io/codemirror-promql": "0.48.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0", @@ -23422,7 +23422,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.47.0", + "@prometheus-io/codemirror-promql": "0.48.0-rc.1", "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.13", "@types/flot": "0.0.32", @@ -23486,7 +23486,7 @@ "@lezer/common": "^1.0.3", "@lezer/highlight": "^1.1.6", "@lezer/lr": "^1.3.6", - "@prometheus-io/lezer-promql": "0.47.0", + "@prometheus-io/lezer-promql": "0.48.0-rc.1", "isomorphic-fetch": "^3.0.0", "lru-cache": "^7.18.3", "nock": "^13.3.1" diff --git a/web/ui/package.json b/web/ui/package.json index a6b08715f5..76b5ad2a89 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.1.0", "typescript": "^4.9.5" }, - "version": "0.46.0" + "version": "0.48.0-rc.1" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index dd0a95640f..3d1de883a5 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.47.0", + "version": "0.48.0-rc.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.7.1", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.3.6", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.47.0", + "@prometheus-io/codemirror-promql": "0.48.0-rc.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.0",