mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Merge branch 'upstream/main' into dimitar/sync-with-upstream-main
This commit is contained in:
commit
dba5006d7a
|
@ -18,6 +18,7 @@ linters:
|
||||||
- gofumpt
|
- gofumpt
|
||||||
- goimports
|
- goimports
|
||||||
- misspell
|
- misspell
|
||||||
|
- nolintlint
|
||||||
- predeclared
|
- predeclared
|
||||||
- revive
|
- revive
|
||||||
- unconvert
|
- unconvert
|
||||||
|
@ -32,15 +33,9 @@ issues:
|
||||||
- path: _test.go
|
- path: _test.go
|
||||||
linters:
|
linters:
|
||||||
- errcheck
|
- errcheck
|
||||||
- path: discovery/
|
|
||||||
linters:
|
|
||||||
- errorlint
|
|
||||||
- path: scrape/
|
- path: scrape/
|
||||||
linters:
|
linters:
|
||||||
- errorlint
|
- errorlint
|
||||||
- path: storage/
|
|
||||||
linters:
|
|
||||||
- errorlint
|
|
||||||
- path: tsdb/
|
- path: tsdb/
|
||||||
linters:
|
linters:
|
||||||
- errorlint
|
- errorlint
|
||||||
|
|
35
CHANGELOG.md
35
CHANGELOG.md
|
@ -1,5 +1,40 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2.48.0-rc.1 / 2023-10-24
|
||||||
|
|
||||||
|
* [BUGFIX] PromQL: Reduce inefficiency introduced by warnings/annotations and temporarily remove possible non-counter warnings. #13012
|
||||||
|
|
||||||
|
## 2.48.0-rc.0 / 2023-10-10
|
||||||
|
|
||||||
|
* [CHANGE] Remote-write: respect Retry-After header on 5xx errors. #12677
|
||||||
|
* [FEATURE] Alerting: Add AWS SigV4 authentication support for Alertmanager endpoints. #12774
|
||||||
|
* [FEATURE] Promtool: Add support for histograms in the TSDB dump command. #12775
|
||||||
|
* [FEATURE] PromQL: Add warnings (and annotations) to PromQL query results. #12152 #12982 #12988
|
||||||
|
* [FEATURE] Remote-write: Add Azure AD OAuth authentication support for remote write requests. #12572
|
||||||
|
* [ENHANCEMENT] Remote-write: Add a header to count retried remote write requests. #12729
|
||||||
|
* [ENHANCEMENT] TSDB: Improve query performance by re-using iterator when moving between series. #12757
|
||||||
|
* [ENHANCEMENT] UI: Move /targets page discovered labels to expandable section #12824
|
||||||
|
* [ENHANCEMENT] TSDB: Optimize WBL loading by not sending empty buffers over channel. #12808
|
||||||
|
* [ENHANCEMENT] TSDB: Reply WBL mmap markers concurrently. #12801
|
||||||
|
* [ENHANCEMENT] Promtool: Add support for specifying series matchers in the TSDB analyze command. #12842
|
||||||
|
* [ENHANCEMENT] PromQL: Prevent Prometheus from overallocating memory on subquery with large amount of steps. #12734
|
||||||
|
* [ENHANCEMENT] PromQL: Add warning when monotonicity is forced in the input to histogram_quantile. #12931
|
||||||
|
* [ENHANCEMENT] Scraping: Optimize sample appending by reducing garbage. #12939
|
||||||
|
* [ENHANCEMENT] Storage: Reduce memory allocations in queries that merge series sets. #12938
|
||||||
|
* [ENHANCEMENT] UI: Show group interval in rules display. #12943
|
||||||
|
* [ENHANCEMENT] Scraping: Save memory when scraping by delaying creation of buffer. #12953
|
||||||
|
* [ENHANCEMENT] Agent: Allow ingestion of out-of-order samples. #12897
|
||||||
|
* [ENHANCEMENT] Promtool: Improve support for native histograms in TSDB analyze command. #12869
|
||||||
|
* [BUGFIX] SD: Ensure that discovery managers are properly canceled. #10569
|
||||||
|
* [BUGFIX] TSDB: Fix PostingsForMatchers race with creating new series. #12558
|
||||||
|
* [BUGFIX] TSDB: Fix handling of explicit counter reset header in histograms. #12772
|
||||||
|
* [BUGFIX] SD: Validate HTTP client configuration in HTTP, EC2, Azure, Uyuni, PuppetDB, and Lightsail SDs. #12762 #12811 #12812 #12815 #12814 #12816
|
||||||
|
* [BUGFIX] TSDB: Fix counter reset edgecases causing native histogram panics. #12838
|
||||||
|
* [BUGFIX] TSDB: Fix duplicate sample detection at chunk size limit. #12874
|
||||||
|
* [BUGFIX] Promtool: Fix errors not being reported in check rules command. #12715
|
||||||
|
* [BUGFIX] TSDB: Avoid panics reported in logs when head initialization takes a long time. #12876
|
||||||
|
* [BUGFIX] TSDB: Ensure that WBL is repaired when possible. #12406
|
||||||
|
|
||||||
## 2.47.1 / 2023-10-04
|
## 2.47.1 / 2023-10-04
|
||||||
|
|
||||||
* [BUGFIX] Fix duplicate sample detection at chunk size limit #12874
|
* [BUGFIX] Fix duplicate sample detection at chunk size limit #12874
|
||||||
|
|
|
@ -7,7 +7,7 @@ Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison
|
||||||
* `discovery`
|
* `discovery`
|
||||||
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
|
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
|
||||||
* `documentation`
|
* `documentation`
|
||||||
* `prometheus-mixin`: Björn Rabenstein (<beorn@grafana.com> / @beorn7)
|
* `prometheus-mixin`: Matthias Loibl (<mail@matthiasloibl.com> / @metalmatze)
|
||||||
* `storage`
|
* `storage`
|
||||||
* `remote`: Chris Marchbanks (<csmarchbanks@gmail.com> / @csmarchbanks), Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
* `remote`: Chris Marchbanks (<csmarchbanks@gmail.com> / @csmarchbanks), Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
||||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// The main package for the Prometheus server executable.
|
// The main package for the Prometheus server executable.
|
||||||
// nolint:revive // Many unsued function arguments in this file by design.
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -44,7 +44,7 @@ func sortSamples(samples []backfillSample) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive
|
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample {
|
||||||
ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
samples := []backfillSample{}
|
samples := []backfillSample{}
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
|
|
|
@ -411,7 +411,6 @@ func checkExperimental(f bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:revive
|
|
||||||
var lintError = fmt.Errorf("lint error")
|
var lintError = fmt.Errorf("lint error")
|
||||||
|
|
||||||
type lintConfig struct {
|
type lintConfig struct {
|
||||||
|
|
|
@ -35,7 +35,7 @@ type mockQueryRangeAPI struct {
|
||||||
samples model.Matrix
|
samples model.Matrix
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive
|
func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {
|
||||||
return mockAPI.samples, v1.Warnings{}, nil
|
return mockAPI.samples, v1.Warnings{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -563,6 +563,8 @@ type ScrapeConfig struct {
|
||||||
HonorLabels bool `yaml:"honor_labels,omitempty"`
|
HonorLabels bool `yaml:"honor_labels,omitempty"`
|
||||||
// Indicator whether the scraped timestamps should be respected.
|
// Indicator whether the scraped timestamps should be respected.
|
||||||
HonorTimestamps bool `yaml:"honor_timestamps"`
|
HonorTimestamps bool `yaml:"honor_timestamps"`
|
||||||
|
// Indicator whether to track the staleness of the scraped timestamps.
|
||||||
|
TrackTimestampsStaleness bool `yaml:"track_timestamps_staleness"`
|
||||||
// A set of query parameters with which the target is scraped.
|
// A set of query parameters with which the target is scraped.
|
||||||
Params url.Values `yaml:"params,omitempty"`
|
Params url.Values `yaml:"params,omitempty"`
|
||||||
// How frequently to scrape the targets of this scrape config.
|
// How frequently to scrape the targets of this scrape config.
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// nolint:revive // Many legitimately empty blocks in this file.
|
|
||||||
package kubernetes
|
package kubernetes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -15,6 +15,7 @@ package kubernetes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -183,14 +184,14 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
cacheSyncs = append(cacheSyncs, e.nodeInf.HasSynced)
|
cacheSyncs = append(cacheSyncs, e.nodeInf.HasSynced)
|
||||||
}
|
}
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||||
if ctx.Err() != context.Canceled {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache")
|
level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for e.process(ctx, ch) { // nolint:revive
|
for e.process(ctx, ch) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -88,7 +88,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for i.process(ctx, ch) { // nolint:revive
|
for i.process(ctx, ch) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for n.process(ctx, ch) { // nolint:revive
|
for n.process(ctx, ch) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -131,7 +131,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for p.process(ctx, ch) { // nolint:revive
|
for p.process(ctx, ch) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for s.process(ctx, ch) { // nolint:revive
|
for s.process(ctx, ch) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -193,7 +193,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
for _, pathUpdate := range d.pathUpdates {
|
for _, pathUpdate := range d.pathUpdates {
|
||||||
// Drain event channel in case the treecache leaks goroutines otherwise.
|
// Drain event channel in case the treecache leaks goroutines otherwise.
|
||||||
for range pathUpdate { // nolint:revive
|
for range pathUpdate {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.conn.Close()
|
d.conn.Close()
|
||||||
|
|
|
@ -222,6 +222,14 @@ job_name: <job_name>
|
||||||
# by the target will be ignored.
|
# by the target will be ignored.
|
||||||
[ honor_timestamps: <boolean> | default = true ]
|
[ honor_timestamps: <boolean> | default = true ]
|
||||||
|
|
||||||
|
# track_timestamps_staleness controls whether Prometheus tracks staleness of
|
||||||
|
# the metrics that have an explicit timestamps present in scraped data.
|
||||||
|
#
|
||||||
|
# If track_timestamps_staleness is set to "true", a staleness marker will be
|
||||||
|
# inserted in the TSDB when a metric is no longer present or the target
|
||||||
|
# is down.
|
||||||
|
[ track_timestamps_staleness: <boolean> | default = false ]
|
||||||
|
|
||||||
# Configures the protocol scheme used for requests.
|
# Configures the protocol scheme used for requests.
|
||||||
[ scheme: <scheme> | default = http ]
|
[ scheme: <scheme> | default = http ]
|
||||||
|
|
||||||
|
|
|
@ -679,6 +679,7 @@ URL query parameters:
|
||||||
- `rule_name[]=<string>`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done.
|
- `rule_name[]=<string>`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done.
|
||||||
- `rule_group[]=<string>`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done.
|
- `rule_group[]=<string>`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done.
|
||||||
- `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done.
|
- `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done.
|
||||||
|
- `exclude_alerts=<bool>`: only return rules, do not return active alerts.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
$ curl http://localhost:9090/api/v1/rules
|
$ curl http://localhost:9090/api/v1/rules
|
||||||
|
@ -1307,4 +1308,4 @@ Enable the OTLP receiver by the feature flag
|
||||||
`--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver
|
`--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver
|
||||||
endpoint is `/api/v1/otlp/v1/metrics`.
|
endpoint is `/api/v1/otlp/v1/metrics`.
|
||||||
|
|
||||||
*New in v2.47*
|
*New in v2.47*
|
||||||
|
|
|
@ -1173,9 +1173,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
||||||
bufHelpers[i] = make([]EvalSeriesHelper, len(matrixes[i]))
|
bufHelpers[i] = make([]EvalSeriesHelper, len(matrixes[i]))
|
||||||
|
|
||||||
for si, series := range matrixes[i] {
|
for si, series := range matrixes[i] {
|
||||||
h := seriesHelpers[i][si]
|
prepSeries(series.Metric, &seriesHelpers[i][si])
|
||||||
prepSeries(series.Metric, &h)
|
|
||||||
seriesHelpers[i][si] = h
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2028,7 +2026,7 @@ func (ev *evaluator) matrixIterSlice(
|
||||||
// (b) the number of samples is relatively small.
|
// (b) the number of samples is relatively small.
|
||||||
// so a linear search will be as fast as a binary search.
|
// so a linear search will be as fast as a binary search.
|
||||||
var drop int
|
var drop int
|
||||||
for drop = 0; floats[drop].T < mint; drop++ { // nolint:revive
|
for drop = 0; floats[drop].T < mint; drop++ {
|
||||||
}
|
}
|
||||||
ev.currentSamples -= drop
|
ev.currentSamples -= drop
|
||||||
copy(floats, floats[drop:])
|
copy(floats, floats[drop:])
|
||||||
|
@ -2050,7 +2048,7 @@ func (ev *evaluator) matrixIterSlice(
|
||||||
// (b) the number of samples is relatively small.
|
// (b) the number of samples is relatively small.
|
||||||
// so a linear search will be as fast as a binary search.
|
// so a linear search will be as fast as a binary search.
|
||||||
var drop int
|
var drop int
|
||||||
for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive
|
for drop = 0; histograms[drop].T < mint; drop++ {
|
||||||
}
|
}
|
||||||
copy(histograms, histograms[drop:])
|
copy(histograms, histograms[drop:])
|
||||||
histograms = histograms[:len(histograms)-drop]
|
histograms = histograms[:len(histograms)-drop]
|
||||||
|
|
|
@ -1657,7 +1657,6 @@ func TestRecoverEvaluatorRuntime(t *testing.T) {
|
||||||
|
|
||||||
// Cause a runtime panic.
|
// Cause a runtime panic.
|
||||||
var a []int
|
var a []int
|
||||||
//nolint:govet
|
|
||||||
a[123] = 1
|
a[123] = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// nolint:revive // Many unsued function arguments in this file by design.
|
|
||||||
package promql
|
package promql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -55,7 +55,6 @@ type Statement interface {
|
||||||
Node
|
Node
|
||||||
|
|
||||||
// PromQLStmt ensures that no other type accidentally implements the interface
|
// PromQLStmt ensures that no other type accidentally implements the interface
|
||||||
// nolint:unused
|
|
||||||
PromQLStmt()
|
PromQLStmt()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// nolint:revive // Many legitimately empty blocks in this file.
|
|
||||||
package parser
|
package parser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -72,7 +72,6 @@ func WithFunctions(functions map[string]*Function) Opt {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewParser returns a new parser.
|
// NewParser returns a new parser.
|
||||||
// nolint:revive
|
|
||||||
func NewParser(input string, opts ...Opt) *parser {
|
func NewParser(input string, opts ...Opt) *parser {
|
||||||
p := parserPool.Get().(*parser)
|
p := parserPool.Get().(*parser)
|
||||||
|
|
||||||
|
@ -660,9 +659,9 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
|
||||||
// This is made a function instead of a variable, so it is lazily evaluated on demand.
|
// This is made a function instead of a variable, so it is lazily evaluated on demand.
|
||||||
opRange := func() (r posrange.PositionRange) {
|
opRange := func() (r posrange.PositionRange) {
|
||||||
// Remove whitespace at the beginning and end of the range.
|
// Remove whitespace at the beginning and end of the range.
|
||||||
for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { // nolint:revive
|
for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ {
|
||||||
}
|
}
|
||||||
for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { // nolint:revive
|
for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- {
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -4061,7 +4061,6 @@ func TestRecoverParserRuntime(t *testing.T) {
|
||||||
defer p.recover(&err)
|
defer p.recover(&err)
|
||||||
// Cause a runtime panic.
|
// Cause a runtime panic.
|
||||||
var a []int
|
var a []int
|
||||||
//nolint:govet
|
|
||||||
a[123] = 1
|
a[123] = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
167
scrape/scrape.go
167
scrape/scrape.go
|
@ -95,18 +95,19 @@ type labelLimits struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type scrapeLoopOptions struct {
|
type scrapeLoopOptions struct {
|
||||||
target *Target
|
target *Target
|
||||||
scraper scraper
|
scraper scraper
|
||||||
sampleLimit int
|
sampleLimit int
|
||||||
bucketLimit int
|
bucketLimit int
|
||||||
labelLimits *labelLimits
|
labelLimits *labelLimits
|
||||||
honorLabels bool
|
honorLabels bool
|
||||||
honorTimestamps bool
|
honorTimestamps bool
|
||||||
interval time.Duration
|
trackTimestampsStaleness bool
|
||||||
timeout time.Duration
|
interval time.Duration
|
||||||
scrapeClassicHistograms bool
|
timeout time.Duration
|
||||||
mrc []*relabel.Config
|
scrapeClassicHistograms bool
|
||||||
cache *scrapeCache
|
mrc []*relabel.Config
|
||||||
|
cache *scrapeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
const maxAheadTime = 10 * time.Minute
|
const maxAheadTime = 10 * time.Minute
|
||||||
|
@ -160,6 +161,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
||||||
cache,
|
cache,
|
||||||
offsetSeed,
|
offsetSeed,
|
||||||
opts.honorTimestamps,
|
opts.honorTimestamps,
|
||||||
|
opts.trackTimestampsStaleness,
|
||||||
opts.sampleLimit,
|
opts.sampleLimit,
|
||||||
opts.bucketLimit,
|
opts.bucketLimit,
|
||||||
opts.labelLimits,
|
opts.labelLimits,
|
||||||
|
@ -270,9 +272,10 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
||||||
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
||||||
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
||||||
}
|
}
|
||||||
honorLabels = sp.config.HonorLabels
|
honorLabels = sp.config.HonorLabels
|
||||||
honorTimestamps = sp.config.HonorTimestamps
|
honorTimestamps = sp.config.HonorTimestamps
|
||||||
mrc = sp.config.MetricRelabelConfigs
|
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
||||||
|
mrc = sp.config.MetricRelabelConfigs
|
||||||
)
|
)
|
||||||
|
|
||||||
sp.targetMtx.Lock()
|
sp.targetMtx.Lock()
|
||||||
|
@ -298,17 +301,18 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
||||||
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
|
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
|
||||||
}
|
}
|
||||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||||
target: t,
|
target: t,
|
||||||
scraper: s,
|
scraper: s,
|
||||||
sampleLimit: sampleLimit,
|
sampleLimit: sampleLimit,
|
||||||
bucketLimit: bucketLimit,
|
bucketLimit: bucketLimit,
|
||||||
labelLimits: labelLimits,
|
labelLimits: labelLimits,
|
||||||
honorLabels: honorLabels,
|
honorLabels: honorLabels,
|
||||||
honorTimestamps: honorTimestamps,
|
honorTimestamps: honorTimestamps,
|
||||||
mrc: mrc,
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
||||||
cache: cache,
|
mrc: mrc,
|
||||||
interval: interval,
|
cache: cache,
|
||||||
timeout: timeout,
|
interval: interval,
|
||||||
|
timeout: timeout,
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -396,10 +400,11 @@ func (sp *scrapePool) sync(targets []*Target) {
|
||||||
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
||||||
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
||||||
}
|
}
|
||||||
honorLabels = sp.config.HonorLabels
|
honorLabels = sp.config.HonorLabels
|
||||||
honorTimestamps = sp.config.HonorTimestamps
|
honorTimestamps = sp.config.HonorTimestamps
|
||||||
mrc = sp.config.MetricRelabelConfigs
|
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
||||||
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
|
mrc = sp.config.MetricRelabelConfigs
|
||||||
|
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
|
||||||
)
|
)
|
||||||
|
|
||||||
sp.targetMtx.Lock()
|
sp.targetMtx.Lock()
|
||||||
|
@ -421,17 +426,18 @@ func (sp *scrapePool) sync(targets []*Target) {
|
||||||
metrics: sp.metrics,
|
metrics: sp.metrics,
|
||||||
}
|
}
|
||||||
l := sp.newLoop(scrapeLoopOptions{
|
l := sp.newLoop(scrapeLoopOptions{
|
||||||
target: t,
|
target: t,
|
||||||
scraper: s,
|
scraper: s,
|
||||||
sampleLimit: sampleLimit,
|
sampleLimit: sampleLimit,
|
||||||
bucketLimit: bucketLimit,
|
bucketLimit: bucketLimit,
|
||||||
labelLimits: labelLimits,
|
labelLimits: labelLimits,
|
||||||
honorLabels: honorLabels,
|
honorLabels: honorLabels,
|
||||||
honorTimestamps: honorTimestamps,
|
honorTimestamps: honorTimestamps,
|
||||||
mrc: mrc,
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
||||||
interval: interval,
|
mrc: mrc,
|
||||||
timeout: timeout,
|
interval: interval,
|
||||||
scrapeClassicHistograms: scrapeClassicHistograms,
|
timeout: timeout,
|
||||||
|
scrapeClassicHistograms: scrapeClassicHistograms,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.setForcedError(err)
|
l.setForcedError(err)
|
||||||
|
@ -750,21 +756,22 @@ type cacheEntry struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type scrapeLoop struct {
|
type scrapeLoop struct {
|
||||||
scraper scraper
|
scraper scraper
|
||||||
l log.Logger
|
l log.Logger
|
||||||
cache *scrapeCache
|
cache *scrapeCache
|
||||||
lastScrapeSize int
|
lastScrapeSize int
|
||||||
buffers *pool.Pool
|
buffers *pool.Pool
|
||||||
offsetSeed uint64
|
offsetSeed uint64
|
||||||
honorTimestamps bool
|
honorTimestamps bool
|
||||||
forcedErr error
|
trackTimestampsStaleness bool
|
||||||
forcedErrMtx sync.Mutex
|
forcedErr error
|
||||||
sampleLimit int
|
forcedErrMtx sync.Mutex
|
||||||
bucketLimit int
|
sampleLimit int
|
||||||
labelLimits *labelLimits
|
bucketLimit int
|
||||||
interval time.Duration
|
labelLimits *labelLimits
|
||||||
timeout time.Duration
|
interval time.Duration
|
||||||
scrapeClassicHistograms bool
|
timeout time.Duration
|
||||||
|
scrapeClassicHistograms bool
|
||||||
|
|
||||||
appender func(ctx context.Context) storage.Appender
|
appender func(ctx context.Context) storage.Appender
|
||||||
sampleMutator labelsMutator
|
sampleMutator labelsMutator
|
||||||
|
@ -1046,6 +1053,7 @@ func newScrapeLoop(ctx context.Context,
|
||||||
cache *scrapeCache,
|
cache *scrapeCache,
|
||||||
offsetSeed uint64,
|
offsetSeed uint64,
|
||||||
honorTimestamps bool,
|
honorTimestamps bool,
|
||||||
|
trackTimestampsStaleness bool,
|
||||||
sampleLimit int,
|
sampleLimit int,
|
||||||
bucketLimit int,
|
bucketLimit int,
|
||||||
labelLimits *labelLimits,
|
labelLimits *labelLimits,
|
||||||
|
@ -1080,27 +1088,28 @@ func newScrapeLoop(ctx context.Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
sl := &scrapeLoop{
|
sl := &scrapeLoop{
|
||||||
scraper: sc,
|
scraper: sc,
|
||||||
buffers: buffers,
|
buffers: buffers,
|
||||||
cache: cache,
|
cache: cache,
|
||||||
appender: appender,
|
appender: appender,
|
||||||
sampleMutator: sampleMutator,
|
sampleMutator: sampleMutator,
|
||||||
reportSampleMutator: reportSampleMutator,
|
reportSampleMutator: reportSampleMutator,
|
||||||
stopped: make(chan struct{}),
|
stopped: make(chan struct{}),
|
||||||
offsetSeed: offsetSeed,
|
offsetSeed: offsetSeed,
|
||||||
l: l,
|
l: l,
|
||||||
parentCtx: ctx,
|
parentCtx: ctx,
|
||||||
appenderCtx: appenderCtx,
|
appenderCtx: appenderCtx,
|
||||||
honorTimestamps: honorTimestamps,
|
honorTimestamps: honorTimestamps,
|
||||||
sampleLimit: sampleLimit,
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
||||||
bucketLimit: bucketLimit,
|
sampleLimit: sampleLimit,
|
||||||
labelLimits: labelLimits,
|
bucketLimit: bucketLimit,
|
||||||
interval: interval,
|
labelLimits: labelLimits,
|
||||||
timeout: timeout,
|
interval: interval,
|
||||||
scrapeClassicHistograms: scrapeClassicHistograms,
|
timeout: timeout,
|
||||||
reportExtraMetrics: reportExtraMetrics,
|
scrapeClassicHistograms: scrapeClassicHistograms,
|
||||||
appendMetadataToWAL: appendMetadataToWAL,
|
reportExtraMetrics: reportExtraMetrics,
|
||||||
metrics: metrics,
|
appendMetadataToWAL: appendMetadataToWAL,
|
||||||
|
metrics: metrics,
|
||||||
}
|
}
|
||||||
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
||||||
|
|
||||||
|
@ -1547,7 +1556,7 @@ loop:
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
if parsedTimestamp == nil {
|
if parsedTimestamp == nil || sl.trackTimestampsStaleness {
|
||||||
// Bypass staleness logic if there is an explicit timestamp.
|
// Bypass staleness logic if there is an explicit timestamp.
|
||||||
sl.cache.trackStaleness(hash, lset)
|
sl.cache.trackStaleness(hash, lset)
|
||||||
}
|
}
|
||||||
|
@ -1628,7 +1637,7 @@ loop:
|
||||||
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
|
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
|
||||||
switch errors.Cause(err) {
|
switch errors.Cause(err) {
|
||||||
case nil:
|
case nil:
|
||||||
if tp == nil && ce != nil {
|
if (tp == nil || sl.trackTimestampsStaleness) && ce != nil {
|
||||||
sl.cache.trackStaleness(ce.hash, ce.lset)
|
sl.cache.trackStaleness(ce.hash, ce.lset)
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
|
@ -650,6 +650,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
|
||||||
nopMutator,
|
nopMutator,
|
||||||
nil, nil, 0,
|
nil, nil, 0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
1,
|
1,
|
||||||
|
@ -724,6 +725,7 @@ func TestScrapeLoopStop(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
10*time.Millisecond,
|
10*time.Millisecond,
|
||||||
|
@ -802,6 +804,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
time.Second,
|
time.Second,
|
||||||
|
@ -859,6 +862,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
time.Second,
|
time.Second,
|
||||||
|
@ -920,6 +924,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
time.Second,
|
time.Second,
|
||||||
|
@ -980,6 +985,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
|
||||||
cache,
|
cache,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -1039,6 +1045,7 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -1101,6 +1108,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -1181,6 +1189,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
10*time.Millisecond,
|
10*time.Millisecond,
|
||||||
|
@ -1246,6 +1255,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
10*time.Millisecond,
|
10*time.Millisecond,
|
||||||
|
@ -1314,6 +1324,7 @@ func TestScrapeLoopCache(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
10*time.Millisecond,
|
10*time.Millisecond,
|
||||||
|
@ -1399,6 +1410,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
10*time.Millisecond,
|
10*time.Millisecond,
|
||||||
|
@ -1515,6 +1527,7 @@ func TestScrapeLoopAppend(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -1613,7 +1626,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
|
||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
func(ctx context.Context) storage.Appender { return app },
|
func(ctx context.Context) storage.Appender { return app },
|
||||||
nil, 0, true, 0, 0, nil, 0, 0, false, false, false, nil, false, newTestScrapeMetrics(t),
|
nil, 0, true, false, 0, 0, nil, 0, 0, false, false, false, nil, false, newTestScrapeMetrics(t),
|
||||||
)
|
)
|
||||||
slApp := sl.appender(context.Background())
|
slApp := sl.appender(context.Background())
|
||||||
_, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
|
_, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
|
||||||
|
@ -1644,6 +1657,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -1704,6 +1718,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
app.limit, 0,
|
app.limit, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -1783,6 +1798,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
app.limit, 0,
|
app.limit, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -1883,6 +1899,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -1933,6 +1950,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -1986,6 +2004,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -2313,6 +2332,7 @@ metric: <
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -2402,6 +2422,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -2456,6 +2477,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
10*time.Millisecond,
|
10*time.Millisecond,
|
||||||
|
@ -2494,6 +2516,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
10*time.Millisecond,
|
10*time.Millisecond,
|
||||||
|
@ -2545,6 +2568,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -2592,6 +2616,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -2883,6 +2908,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
|
||||||
func(ctx context.Context) storage.Appender { return capp },
|
func(ctx context.Context) storage.Appender { return capp },
|
||||||
nil, 0,
|
nil, 0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -2926,6 +2952,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
|
||||||
func(ctx context.Context) storage.Appender { return capp },
|
func(ctx context.Context) storage.Appender { return capp },
|
||||||
nil, 0,
|
nil, 0,
|
||||||
false,
|
false,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -2968,6 +2995,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -3028,6 +3056,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -3293,6 +3322,7 @@ func TestScrapeAddFast(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
|
@ -3381,6 +3411,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
nil,
|
nil,
|
||||||
10*time.Millisecond,
|
10*time.Millisecond,
|
||||||
|
@ -3585,6 +3616,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
true,
|
true,
|
||||||
|
false,
|
||||||
0, 0,
|
0, 0,
|
||||||
&test.labelLimits,
|
&test.labelLimits,
|
||||||
0,
|
0,
|
||||||
|
@ -3646,3 +3678,68 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
|
||||||
require.Equal(t, "3s", sp.ActiveTargets()[0].labels.Get(model.ScrapeIntervalLabel))
|
require.Equal(t, "3s", sp.ActiveTargets()[0].labels.Get(model.ScrapeIntervalLabel))
|
||||||
require.Equal(t, "750ms", sp.ActiveTargets()[0].labels.Get(model.ScrapeTimeoutLabel))
|
require.Equal(t, "750ms", sp.ActiveTargets()[0].labels.Get(model.ScrapeTimeoutLabel))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) {
|
||||||
|
appender := &collectResultAppender{}
|
||||||
|
var (
|
||||||
|
signal = make(chan struct{}, 1)
|
||||||
|
scraper = &testScraper{}
|
||||||
|
app = func(ctx context.Context) storage.Appender { return appender }
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
sl := newScrapeLoop(ctx,
|
||||||
|
scraper,
|
||||||
|
nil, nil,
|
||||||
|
nopMutator,
|
||||||
|
nopMutator,
|
||||||
|
app,
|
||||||
|
nil,
|
||||||
|
0,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
0, 0,
|
||||||
|
nil,
|
||||||
|
10*time.Millisecond,
|
||||||
|
time.Hour,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
newTestScrapeMetrics(t),
|
||||||
|
)
|
||||||
|
// Succeed once, several failures, then stop.
|
||||||
|
numScrapes := 0
|
||||||
|
|
||||||
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
||||||
|
numScrapes++
|
||||||
|
|
||||||
|
switch numScrapes {
|
||||||
|
case 1:
|
||||||
|
w.Write([]byte(fmt.Sprintf("metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond))))
|
||||||
|
return nil
|
||||||
|
case 5:
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
return errors.New("scrape failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
sl.run(nil)
|
||||||
|
signal <- struct{}{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-signal:
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
t.Fatalf("Scrape wasn't stopped.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
|
||||||
|
// each scrape successful or not.
|
||||||
|
require.Equal(t, 27, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender)
|
||||||
|
require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected")
|
||||||
|
require.True(t, value.IsStaleNaN(appender.resultFloats[6].f),
|
||||||
|
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f))
|
||||||
|
}
|
||||||
|
|
|
@ -145,9 +145,7 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) {
|
||||||
func (t *Target) hash() uint64 {
|
func (t *Target) hash() uint64 {
|
||||||
h := fnv.New64a()
|
h := fnv.New64a()
|
||||||
|
|
||||||
//nolint: errcheck
|
|
||||||
h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash())))
|
h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash())))
|
||||||
//nolint: errcheck
|
|
||||||
h.Write([]byte(t.URL().String()))
|
h.Write([]byte(t.URL().String()))
|
||||||
|
|
||||||
return h.Sum64()
|
return h.Sum64()
|
||||||
|
|
|
@ -284,7 +284,8 @@ func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing {
|
||||||
case chunkenc.ValFloatHistogram:
|
case chunkenc.ValFloatHistogram:
|
||||||
r.fhBuf = make([]fhSample, size)
|
r.fhBuf = make([]fhSample, size)
|
||||||
default:
|
default:
|
||||||
r.iBuf = make([]chunks.Sample, size)
|
// Do not initialize anything because the 1st sample will be
|
||||||
|
// added to one of the other bufs anyway.
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
@ -294,6 +295,12 @@ func (r *sampleRing) reset() {
|
||||||
r.i = -1
|
r.i = -1
|
||||||
r.f = 0
|
r.f = 0
|
||||||
r.bufInUse = noBuf
|
r.bufInUse = noBuf
|
||||||
|
|
||||||
|
// The first sample after the reset will always go to a specialized
|
||||||
|
// buffer. If we later need to change to the interface buffer, we'll
|
||||||
|
// copy from the specialized buffer to the interface buffer. For that to
|
||||||
|
// work properly, we have to reset the interface buffer here, too.
|
||||||
|
r.iBuf = r.iBuf[:0]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the current iterator. Invalidates previously returned iterators.
|
// Returns the current iterator. Invalidates previously returned iterators.
|
||||||
|
@ -441,6 +448,7 @@ func (r *sampleRing) add(s chunks.Sample) {
|
||||||
}
|
}
|
||||||
// The new sample isn't a fit for the already existing
|
// The new sample isn't a fit for the already existing
|
||||||
// ones. Copy the latter into the interface buffer where needed.
|
// ones. Copy the latter into the interface buffer where needed.
|
||||||
|
// The interface buffer is assumed to be of length zero at this point.
|
||||||
switch r.bufInUse {
|
switch r.bufInUse {
|
||||||
case fBuf:
|
case fBuf:
|
||||||
for _, s := range r.fBuf {
|
for _, s := range r.fBuf {
|
||||||
|
|
|
@ -90,6 +90,54 @@ func TestSampleRing(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSampleRingMixed(t *testing.T) {
|
||||||
|
h1 := tsdbutil.GenerateTestHistogram(1)
|
||||||
|
h2 := tsdbutil.GenerateTestHistogram(2)
|
||||||
|
|
||||||
|
// With ValNone as the preferred type, nothing should be initialized.
|
||||||
|
r := newSampleRing(10, 2, chunkenc.ValNone)
|
||||||
|
require.Zero(t, len(r.fBuf))
|
||||||
|
require.Zero(t, len(r.hBuf))
|
||||||
|
require.Zero(t, len(r.fhBuf))
|
||||||
|
require.Zero(t, len(r.iBuf))
|
||||||
|
|
||||||
|
// But then mixed adds should work as expected.
|
||||||
|
r.addF(fSample{t: 1, f: 3.14})
|
||||||
|
r.addH(hSample{t: 2, h: h1})
|
||||||
|
|
||||||
|
it := r.iterator()
|
||||||
|
|
||||||
|
require.Equal(t, chunkenc.ValFloat, it.Next())
|
||||||
|
ts, f := it.At()
|
||||||
|
require.Equal(t, int64(1), ts)
|
||||||
|
require.Equal(t, 3.14, f)
|
||||||
|
require.Equal(t, chunkenc.ValHistogram, it.Next())
|
||||||
|
var h *histogram.Histogram
|
||||||
|
ts, h = it.AtHistogram()
|
||||||
|
require.Equal(t, int64(2), ts)
|
||||||
|
require.Equal(t, h1, h)
|
||||||
|
require.Equal(t, chunkenc.ValNone, it.Next())
|
||||||
|
|
||||||
|
r.reset()
|
||||||
|
it = r.iterator()
|
||||||
|
require.Equal(t, chunkenc.ValNone, it.Next())
|
||||||
|
|
||||||
|
r.addF(fSample{t: 3, f: 4.2})
|
||||||
|
r.addH(hSample{t: 4, h: h2})
|
||||||
|
|
||||||
|
it = r.iterator()
|
||||||
|
|
||||||
|
require.Equal(t, chunkenc.ValFloat, it.Next())
|
||||||
|
ts, f = it.At()
|
||||||
|
require.Equal(t, int64(3), ts)
|
||||||
|
require.Equal(t, 4.2, f)
|
||||||
|
require.Equal(t, chunkenc.ValHistogram, it.Next())
|
||||||
|
ts, h = it.AtHistogram()
|
||||||
|
require.Equal(t, int64(4), ts)
|
||||||
|
require.Equal(t, h2, h)
|
||||||
|
require.Equal(t, chunkenc.ValNone, it.Next())
|
||||||
|
}
|
||||||
|
|
||||||
func TestBufferedSeriesIterator(t *testing.T) {
|
func TestBufferedSeriesIterator(t *testing.T) {
|
||||||
var it *BufferedSeriesIterator
|
var it *BufferedSeriesIterator
|
||||||
|
|
||||||
|
@ -211,7 +259,7 @@ func BenchmarkBufferedSeriesIterator(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for it.Next() != chunkenc.ValNone { // nolint:revive
|
for it.Next() != chunkenc.ValNone {
|
||||||
// Scan everything.
|
// Scan everything.
|
||||||
}
|
}
|
||||||
require.NoError(b, it.Err())
|
require.NoError(b, it.Err())
|
||||||
|
|
|
@ -112,7 +112,7 @@ func BenchmarkMemoizedSeriesIterator(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for it.Next() != chunkenc.ValNone { // nolint:revive
|
for it.Next() != chunkenc.ValNone {
|
||||||
// Scan everything.
|
// Scan everything.
|
||||||
}
|
}
|
||||||
require.NoError(b, it.Err())
|
require.NoError(b, it.Err())
|
||||||
|
|
|
@ -502,9 +502,9 @@ func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
// If any iterator is reporting an error, abort.
|
// If any iterator is reporting an error, abort.
|
||||||
return chunkenc.ValNone
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
} else {
|
continue
|
||||||
heap.Push(&c.h, iter)
|
|
||||||
}
|
}
|
||||||
|
heap.Push(&c.h, iter)
|
||||||
}
|
}
|
||||||
if len(c.h) > 0 {
|
if len(c.h) > 0 {
|
||||||
c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
|
c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
|
||||||
|
@ -599,7 +599,9 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType {
|
||||||
if c.curr.Err() != nil {
|
if c.curr.Err() != nil {
|
||||||
// Abort if we've hit an error.
|
// Abort if we've hit an error.
|
||||||
return chunkenc.ValNone
|
return chunkenc.ValNone
|
||||||
} else if len(c.h) == 0 {
|
}
|
||||||
|
|
||||||
|
if len(c.h) == 0 {
|
||||||
// No iterator left to iterate.
|
// No iterator left to iterate.
|
||||||
c.curr = nil
|
c.curr = nil
|
||||||
return chunkenc.ValNone
|
return chunkenc.ValNone
|
||||||
|
|
|
@ -62,7 +62,7 @@ type OAuthConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AzureADConfig is used to store the config values.
|
// AzureADConfig is used to store the config values.
|
||||||
type AzureADConfig struct { // nolint:revive
|
type AzureADConfig struct {
|
||||||
// ManagedIdentity is the managed identity that is being used to authenticate.
|
// ManagedIdentity is the managed identity that is being used to authenticate.
|
||||||
ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"`
|
ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"`
|
||||||
|
|
||||||
|
|
|
@ -136,7 +136,7 @@ func TestClientRetryAfter(t *testing.T) {
|
||||||
err = c.Store(context.Background(), []byte{}, 0)
|
err = c.Store(context.Background(), []byte{}, 0)
|
||||||
require.Equal(t, tc.expectedRecoverable, errors.As(err, &recErr), "Mismatch in expected recoverable error status.")
|
require.Equal(t, tc.expectedRecoverable, errors.As(err, &recErr), "Mismatch in expected recoverable error status.")
|
||||||
if tc.expectedRecoverable {
|
if tc.expectedRecoverable {
|
||||||
require.Equal(t, tc.expectedRetryAfter, err.(RecoverableError).retryAfter)
|
require.Equal(t, tc.expectedRetryAfter, recErr.retryAfter)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ package remote
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -169,7 +170,8 @@ func (h *readHandler) remoteReadSamples(
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}(); err != nil {
|
}(); err != nil {
|
||||||
if httpErr, ok := err.(HTTPError); ok {
|
var httpErr HTTPError
|
||||||
|
if errors.As(err, &httpErr) {
|
||||||
http.Error(w, httpErr.Error(), httpErr.Status())
|
http.Error(w, httpErr.Error(), httpErr.Status())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -241,7 +243,8 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}(); err != nil {
|
}(); err != nil {
|
||||||
if httpErr, ok := err.(HTTPError); ok {
|
var httpErr HTTPError
|
||||||
|
if errors.As(err, &httpErr) {
|
||||||
http.Error(w, httpErr.Error(), httpErr.Status())
|
http.Error(w, httpErr.Error(), httpErr.Status())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,9 +66,9 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
err = h.write(r.Context(), req)
|
err = h.write(r.Context(), req)
|
||||||
switch err {
|
switch {
|
||||||
case nil:
|
case err == nil:
|
||||||
case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp:
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
||||||
// Indicated an out of order sample is a bad request to prevent retries.
|
// Indicated an out of order sample is a bad request to prevent retries.
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
|
@ -222,9 +222,9 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
Timeseries: prwMetrics,
|
Timeseries: prwMetrics,
|
||||||
})
|
})
|
||||||
|
|
||||||
switch err {
|
switch {
|
||||||
case nil:
|
case err == nil:
|
||||||
case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp:
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
||||||
// Indicated an out of order sample is a bad request to prevent retries.
|
// Indicated an out of order sample is a bad request to prevent retries.
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
|
|
|
@ -181,7 +181,7 @@ func NewTemplateExpander(
|
||||||
return html_template.HTML(text)
|
return html_template.HTML(text)
|
||||||
},
|
},
|
||||||
"match": regexp.MatchString,
|
"match": regexp.MatchString,
|
||||||
"title": strings.Title, // nolint:staticcheck
|
"title": strings.Title, //nolint:staticcheck
|
||||||
"toUpper": strings.ToUpper,
|
"toUpper": strings.ToUpper,
|
||||||
"toLower": strings.ToLower,
|
"toLower": strings.ToLower,
|
||||||
"graphLink": strutil.GraphLinkForExpression,
|
"graphLink": strutil.GraphLinkForExpression,
|
||||||
|
|
|
@ -103,7 +103,7 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) {
|
||||||
// To get an appender, we must know the state it would have if we had
|
// To get an appender, we must know the state it would have if we had
|
||||||
// appended all existing data from scratch. We iterate through the end
|
// appended all existing data from scratch. We iterate through the end
|
||||||
// and populate via the iterator's state.
|
// and populate via the iterator's state.
|
||||||
for it.Next() == ValFloatHistogram { // nolint:revive
|
for it.Next() == ValFloatHistogram {
|
||||||
}
|
}
|
||||||
if err := it.Err(); err != nil {
|
if err := it.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -157,7 +157,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
|
||||||
|
|
||||||
// 3. Now recycle an iterator that was never used to access anything.
|
// 3. Now recycle an iterator that was never used to access anything.
|
||||||
itX := c.Iterator(nil)
|
itX := c.Iterator(nil)
|
||||||
for itX.Next() == ValFloatHistogram { // nolint:revive
|
for itX.Next() == ValFloatHistogram {
|
||||||
// Just iterate through without accessing anything.
|
// Just iterate through without accessing anything.
|
||||||
}
|
}
|
||||||
it3 := c.iterator(itX)
|
it3 := c.iterator(itX)
|
||||||
|
|
|
@ -114,7 +114,7 @@ func (c *HistogramChunk) Appender() (Appender, error) {
|
||||||
// To get an appender, we must know the state it would have if we had
|
// To get an appender, we must know the state it would have if we had
|
||||||
// appended all existing data from scratch. We iterate through the end
|
// appended all existing data from scratch. We iterate through the end
|
||||||
// and populate via the iterator's state.
|
// and populate via the iterator's state.
|
||||||
for it.Next() == ValHistogram { // nolint:revive
|
for it.Next() == ValHistogram {
|
||||||
}
|
}
|
||||||
if err := it.Err(); err != nil {
|
if err := it.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -162,7 +162,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
|
||||||
|
|
||||||
// 3. Now recycle an iterator that was never used to access anything.
|
// 3. Now recycle an iterator that was never used to access anything.
|
||||||
itX := c.Iterator(nil)
|
itX := c.Iterator(nil)
|
||||||
for itX.Next() == ValHistogram { // nolint:revive
|
for itX.Next() == ValHistogram {
|
||||||
// Just iterate through without accessing anything.
|
// Just iterate through without accessing anything.
|
||||||
}
|
}
|
||||||
it3 := c.iterator(itX)
|
it3 := c.iterator(itX)
|
||||||
|
|
|
@ -99,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) {
|
||||||
// To get an appender we must know the state it would have if we had
|
// To get an appender we must know the state it would have if we had
|
||||||
// appended all existing data from scratch.
|
// appended all existing data from scratch.
|
||||||
// We iterate through the end and populate via the iterator's state.
|
// We iterate through the end and populate via the iterator's state.
|
||||||
for it.Next() != ValNone { // nolint:revive
|
for it.Next() != ValNone {
|
||||||
}
|
}
|
||||||
if err := it.Err(); err != nil {
|
if err := it.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -1953,7 +1953,8 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
|
||||||
// The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block.
|
// The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block.
|
||||||
b, ws, err := q.LabelValues(ctx, "blockID")
|
b, ws, err := q.LabelValues(ctx, "blockID")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, annotations.Annotations{}, ws)
|
var nilAnnotations annotations.Annotations
|
||||||
|
require.Equal(t, nilAnnotations, ws)
|
||||||
require.Equal(t, []string{"1", "2"}, b)
|
require.Equal(t, []string{"1", "2"}, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ import (
|
||||||
type multiError []error
|
type multiError []error
|
||||||
|
|
||||||
// NewMulti returns multiError with provided errors added if not nil.
|
// NewMulti returns multiError with provided errors added if not nil.
|
||||||
func NewMulti(errs ...error) multiError { // nolint:revive
|
func NewMulti(errs ...error) multiError {
|
||||||
m := multiError{}
|
m := multiError{}
|
||||||
m.Add(errs...)
|
m.Add(errs...)
|
||||||
return m
|
return m
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// nolint:revive // Many legitimately empty blocks in this file.
|
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// nolint:revive // Many legitimately empty blocks in this file.
|
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// nolint:revive // Many unused function arguments in this file by design.
|
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -287,7 +287,7 @@ func BenchmarkQuerierSelect(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ss := q.Select(context.Background(), sorted, hints, matcher)
|
ss := q.Select(context.Background(), sorted, hints, matcher)
|
||||||
for ss.Next() { // nolint:revive
|
for ss.Next() {
|
||||||
}
|
}
|
||||||
require.NoError(b, ss.Err())
|
require.NoError(b, ss.Err())
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// nolint:revive // Many unsued function arguments in this file by design.
|
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// nolint:revive // Many unsued function arguments in this file by design.
|
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -541,7 +541,7 @@ func TestReaderData(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
reader := fn(sr)
|
reader := fn(sr)
|
||||||
for reader.Next() { // nolint:revive
|
for reader.Next() {
|
||||||
}
|
}
|
||||||
require.NoError(t, reader.Err())
|
require.NoError(t, reader.Err())
|
||||||
|
|
||||||
|
|
|
@ -971,7 +971,6 @@ type segmentBufReader struct {
|
||||||
off int // Offset of read data into current segment.
|
off int // Offset of read data into current segment.
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:revive // TODO: Consider exporting segmentBufReader
|
|
||||||
func NewSegmentBufReader(segs ...*Segment) *segmentBufReader {
|
func NewSegmentBufReader(segs ...*Segment) *segmentBufReader {
|
||||||
if len(segs) == 0 {
|
if len(segs) == 0 {
|
||||||
return &segmentBufReader{}
|
return &segmentBufReader{}
|
||||||
|
@ -983,7 +982,6 @@ func NewSegmentBufReader(segs ...*Segment) *segmentBufReader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:revive
|
|
||||||
func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (sbr *segmentBufReader, err error) {
|
func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (sbr *segmentBufReader, err error) {
|
||||||
if offset == 0 || len(segs) == 0 {
|
if offset == 0 || len(segs) == 0 {
|
||||||
return NewSegmentBufReader(segs...), nil
|
return NewSegmentBufReader(segs...), nil
|
||||||
|
|
|
@ -163,7 +163,7 @@ func TestWALRepair_ReadingError(t *testing.T) {
|
||||||
sr := NewSegmentBufReader(s)
|
sr := NewSegmentBufReader(s)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
r := NewReader(sr)
|
r := NewReader(sr)
|
||||||
for r.Next() { // nolint:revive
|
for r.Next() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the segment so we don't break things on Windows.
|
// Close the segment so we don't break things on Windows.
|
||||||
|
|
|
@ -50,6 +50,9 @@ func (a *Annotations) Add(err error) Annotations {
|
||||||
// the first in-place, and returns the merged first Annotation for convenience.
|
// the first in-place, and returns the merged first Annotation for convenience.
|
||||||
func (a *Annotations) Merge(aa Annotations) Annotations {
|
func (a *Annotations) Merge(aa Annotations) Annotations {
|
||||||
if *a == nil {
|
if *a == nil {
|
||||||
|
if aa == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
*a = Annotations{}
|
*a = Annotations{}
|
||||||
}
|
}
|
||||||
for key, val := range aa {
|
for key, val := range aa {
|
||||||
|
@ -91,7 +94,6 @@ func (a Annotations) AsStrings(query string, maxAnnos int) []string {
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:revive // Ignore ST1012
|
|
||||||
var (
|
var (
|
||||||
// Currently there are only 2 types, warnings and info.
|
// Currently there are only 2 types, warnings and info.
|
||||||
// For now, info are visually identical with warnings as we have not updated
|
// For now, info are visually identical with warnings as we have not updated
|
||||||
|
@ -122,6 +124,10 @@ func (e annoErr) Error() string {
|
||||||
return fmt.Sprintf("%s (%s)", e.Err, e.PositionRange.StartPosInput(e.Query, 0))
|
return fmt.Sprintf("%s (%s)", e.Err, e.PositionRange.StartPosInput(e.Query, 0))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e annoErr) Unwrap() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
// NewInvalidQuantileWarning is used when the user specifies an invalid quantile
|
// NewInvalidQuantileWarning is used when the user specifies an invalid quantile
|
||||||
// value, i.e. a float that is outside the range [0, 1] or NaN.
|
// value, i.e. a float that is outside the range [0, 1] or NaN.
|
||||||
func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) annoErr {
|
func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) annoErr {
|
||||||
|
|
|
@ -116,7 +116,7 @@ func (tc *ZookeeperTreeCache) Stop() {
|
||||||
tc.stop <- struct{}{}
|
tc.stop <- struct{}{}
|
||||||
go func() {
|
go func() {
|
||||||
// Drain tc.head.events so that go routines can make progress and exit.
|
// Drain tc.head.events so that go routines can make progress and exit.
|
||||||
for range tc.head.events { // nolint:revive
|
for range tc.head.events {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
|
|
|
@ -149,13 +149,13 @@ func BenchmarkSyncPoolNewPointer(b *testing.B) {
|
||||||
|
|
||||||
// Warmup
|
// Warmup
|
||||||
item := pool.Get().(*[]byte)
|
item := pool.Get().(*[]byte)
|
||||||
pool.Put(item) //nolint:staticcheck // This allocates.
|
pool.Put(item)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
item := pool.Get().(*[]byte)
|
item := pool.Get().(*[]byte)
|
||||||
buf := *item
|
buf := *item
|
||||||
pool.Put(&buf) //nolint:staticcheck // New pointer.
|
pool.Put(&buf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1373,6 +1373,11 @@ func (api *API) rules(r *http.Request) apiFuncResult {
|
||||||
returnAlerts := typ == "" || typ == "alert"
|
returnAlerts := typ == "" || typ == "alert"
|
||||||
returnRecording := typ == "" || typ == "record"
|
returnRecording := typ == "" || typ == "record"
|
||||||
|
|
||||||
|
excludeAlerts, err := parseExcludeAlerts(r)
|
||||||
|
if err != nil {
|
||||||
|
return invalidParamError(err, "exclude_alerts")
|
||||||
|
}
|
||||||
|
|
||||||
rgs := make([]*RuleGroup, 0, len(ruleGroups))
|
rgs := make([]*RuleGroup, 0, len(ruleGroups))
|
||||||
for _, grp := range ruleGroups {
|
for _, grp := range ruleGroups {
|
||||||
if len(rgSet) > 0 {
|
if len(rgSet) > 0 {
|
||||||
|
@ -1414,6 +1419,10 @@ func (api *API) rules(r *http.Request) apiFuncResult {
|
||||||
if !returnAlerts {
|
if !returnAlerts {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
var activeAlerts []*Alert
|
||||||
|
if !excludeAlerts {
|
||||||
|
activeAlerts = rulesAlertsToAPIAlerts(rule.ActiveAlerts())
|
||||||
|
}
|
||||||
enrichedRule = AlertingRule{
|
enrichedRule = AlertingRule{
|
||||||
State: rule.State().String(),
|
State: rule.State().String(),
|
||||||
Name: rule.Name(),
|
Name: rule.Name(),
|
||||||
|
@ -1422,7 +1431,7 @@ func (api *API) rules(r *http.Request) apiFuncResult {
|
||||||
KeepFiringFor: rule.KeepFiringFor().Seconds(),
|
KeepFiringFor: rule.KeepFiringFor().Seconds(),
|
||||||
Labels: rule.Labels(),
|
Labels: rule.Labels(),
|
||||||
Annotations: rule.Annotations(),
|
Annotations: rule.Annotations(),
|
||||||
Alerts: rulesAlertsToAPIAlerts(rule.ActiveAlerts()),
|
Alerts: activeAlerts,
|
||||||
Health: rule.Health(),
|
Health: rule.Health(),
|
||||||
LastError: lastError,
|
LastError: lastError,
|
||||||
EvaluationTime: rule.GetEvaluationDuration().Seconds(),
|
EvaluationTime: rule.GetEvaluationDuration().Seconds(),
|
||||||
|
@ -1462,6 +1471,20 @@ func (api *API) rules(r *http.Request) apiFuncResult {
|
||||||
return apiFuncResult{res, nil, nil, nil}
|
return apiFuncResult{res, nil, nil, nil}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseExcludeAlerts(r *http.Request) (bool, error) {
|
||||||
|
excludeAlertsParam := strings.ToLower(r.URL.Query().Get("exclude_alerts"))
|
||||||
|
|
||||||
|
if excludeAlertsParam == "" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
excludeAlerts, err := strconv.ParseBool(excludeAlertsParam)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error converting exclude_alerts: %w", err)
|
||||||
|
}
|
||||||
|
return excludeAlerts, nil
|
||||||
|
}
|
||||||
|
|
||||||
type prometheusConfig struct {
|
type prometheusConfig struct {
|
||||||
YAML string `json:"yaml"`
|
YAML string `json:"yaml"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -209,10 +209,12 @@ func (t testAlertmanagerRetriever) toFactory() func(context.Context) Alertmanage
|
||||||
}
|
}
|
||||||
|
|
||||||
type rulesRetrieverMock struct {
|
type rulesRetrieverMock struct {
|
||||||
testing *testing.T
|
alertingRules []*rules.AlertingRule
|
||||||
|
ruleGroups []*rules.Group
|
||||||
|
testing *testing.T
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m rulesRetrieverMock) AlertingRules() []*rules.AlertingRule {
|
func (m *rulesRetrieverMock) CreateAlertingRules() {
|
||||||
expr1, err := parser.ParseExpr(`absent(test_metric3) != 1`)
|
expr1, err := parser.ParseExpr(`absent(test_metric3) != 1`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.testing.Fatalf("unable to parse alert expression: %s", err)
|
m.testing.Fatalf("unable to parse alert expression: %s", err)
|
||||||
|
@ -222,6 +224,11 @@ func (m rulesRetrieverMock) AlertingRules() []*rules.AlertingRule {
|
||||||
m.testing.Fatalf("Unable to parse alert expression: %s", err)
|
m.testing.Fatalf("Unable to parse alert expression: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
expr3, err := parser.ParseExpr(`vector(1)`)
|
||||||
|
if err != nil {
|
||||||
|
m.testing.Fatalf("Unable to parse alert expression: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
rule1 := rules.NewAlertingRule(
|
rule1 := rules.NewAlertingRule(
|
||||||
"test_metric3",
|
"test_metric3",
|
||||||
expr1,
|
expr1,
|
||||||
|
@ -246,15 +253,29 @@ func (m rulesRetrieverMock) AlertingRules() []*rules.AlertingRule {
|
||||||
true,
|
true,
|
||||||
log.NewNopLogger(),
|
log.NewNopLogger(),
|
||||||
)
|
)
|
||||||
|
rule3 := rules.NewAlertingRule(
|
||||||
|
"test_metric5",
|
||||||
|
expr3,
|
||||||
|
time.Second,
|
||||||
|
0,
|
||||||
|
labels.FromStrings("name", "tm5"),
|
||||||
|
labels.Labels{},
|
||||||
|
labels.FromStrings("name", "tm5"),
|
||||||
|
"",
|
||||||
|
false,
|
||||||
|
log.NewNopLogger(),
|
||||||
|
)
|
||||||
|
|
||||||
var r []*rules.AlertingRule
|
var r []*rules.AlertingRule
|
||||||
r = append(r, rule1)
|
r = append(r, rule1)
|
||||||
r = append(r, rule2)
|
r = append(r, rule2)
|
||||||
return r
|
r = append(r, rule3)
|
||||||
|
m.alertingRules = r
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m rulesRetrieverMock) RuleGroups() []*rules.Group {
|
func (m *rulesRetrieverMock) CreateRuleGroups() {
|
||||||
var ar rulesRetrieverMock
|
m.CreateAlertingRules()
|
||||||
arules := ar.AlertingRules()
|
arules := m.AlertingRules()
|
||||||
storage := teststorage.New(m.testing)
|
storage := teststorage.New(m.testing)
|
||||||
defer storage.Close()
|
defer storage.Close()
|
||||||
|
|
||||||
|
@ -271,6 +292,7 @@ func (m rulesRetrieverMock) RuleGroups() []*rules.Group {
|
||||||
Appendable: storage,
|
Appendable: storage,
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: log.NewNopLogger(),
|
Logger: log.NewNopLogger(),
|
||||||
|
NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {},
|
||||||
}
|
}
|
||||||
|
|
||||||
var r []rules.Rule
|
var r []rules.Rule
|
||||||
|
@ -294,10 +316,18 @@ func (m rulesRetrieverMock) RuleGroups() []*rules.Group {
|
||||||
ShouldRestore: false,
|
ShouldRestore: false,
|
||||||
Opts: opts,
|
Opts: opts,
|
||||||
})
|
})
|
||||||
return []*rules.Group{group}
|
m.ruleGroups = []*rules.Group{group}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m rulesRetrieverMock) toFactory() func(context.Context) RulesRetriever {
|
func (m *rulesRetrieverMock) AlertingRules() []*rules.AlertingRule {
|
||||||
|
return m.alertingRules
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *rulesRetrieverMock) RuleGroups() []*rules.Group {
|
||||||
|
return m.ruleGroups
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *rulesRetrieverMock) toFactory() func(context.Context) RulesRetriever {
|
||||||
return func(context.Context) RulesRetriever { return m }
|
return func(context.Context) RulesRetriever { return m }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -380,12 +410,14 @@ func TestEndpoints(t *testing.T) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
t.Run("local", func(t *testing.T) {
|
t.Run("local", func(t *testing.T) {
|
||||||
var algr rulesRetrieverMock
|
algr := rulesRetrieverMock{}
|
||||||
algr.testing = t
|
algr.testing = t
|
||||||
|
|
||||||
algr.AlertingRules()
|
algr.CreateAlertingRules()
|
||||||
|
algr.CreateRuleGroups()
|
||||||
|
|
||||||
algr.RuleGroups()
|
g := algr.RuleGroups()
|
||||||
|
g[0].Eval(context.Background(), time.Now())
|
||||||
|
|
||||||
testTargetRetriever := setupTestTargetRetriever(t)
|
testTargetRetriever := setupTestTargetRetriever(t)
|
||||||
|
|
||||||
|
@ -442,12 +474,14 @@ func TestEndpoints(t *testing.T) {
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var algr rulesRetrieverMock
|
algr := rulesRetrieverMock{}
|
||||||
algr.testing = t
|
algr.testing = t
|
||||||
|
|
||||||
algr.AlertingRules()
|
algr.CreateAlertingRules()
|
||||||
|
algr.CreateRuleGroups()
|
||||||
|
|
||||||
algr.RuleGroups()
|
g := algr.RuleGroups()
|
||||||
|
g[0].Eval(context.Background(), time.Now())
|
||||||
|
|
||||||
testTargetRetriever := setupTestTargetRetriever(t)
|
testTargetRetriever := setupTestTargetRetriever(t)
|
||||||
|
|
||||||
|
@ -1036,6 +1070,36 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
sorter func(interface{})
|
sorter func(interface{})
|
||||||
metadata []targetMetadata
|
metadata []targetMetadata
|
||||||
exemplars []exemplar.QueryResult
|
exemplars []exemplar.QueryResult
|
||||||
|
zeroFunc func(interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
rulesZeroFunc := func(i interface{}) {
|
||||||
|
if i != nil {
|
||||||
|
v := i.(*RuleDiscovery)
|
||||||
|
for _, ruleGroup := range v.RuleGroups {
|
||||||
|
ruleGroup.EvaluationTime = float64(0)
|
||||||
|
ruleGroup.LastEvaluation = time.Time{}
|
||||||
|
for k, rule := range ruleGroup.Rules {
|
||||||
|
switch r := rule.(type) {
|
||||||
|
case AlertingRule:
|
||||||
|
r.LastEvaluation = time.Time{}
|
||||||
|
r.EvaluationTime = float64(0)
|
||||||
|
r.LastError = ""
|
||||||
|
r.Health = "ok"
|
||||||
|
for _, alert := range r.Alerts {
|
||||||
|
alert.ActiveAt = nil
|
||||||
|
}
|
||||||
|
ruleGroup.Rules[k] = r
|
||||||
|
case RecordingRule:
|
||||||
|
r.LastEvaluation = time.Time{}
|
||||||
|
r.EvaluationTime = float64(0)
|
||||||
|
r.LastError = ""
|
||||||
|
r.Health = "ok"
|
||||||
|
ruleGroup.Rules[k] = r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []test{
|
tests := []test{
|
||||||
|
@ -1988,7 +2052,22 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
{
|
{
|
||||||
endpoint: api.alerts,
|
endpoint: api.alerts,
|
||||||
response: &AlertDiscovery{
|
response: &AlertDiscovery{
|
||||||
Alerts: []*Alert{},
|
Alerts: []*Alert{
|
||||||
|
{
|
||||||
|
Labels: labels.FromStrings("alertname", "test_metric5", "name", "tm5"),
|
||||||
|
Annotations: labels.Labels{},
|
||||||
|
State: "pending",
|
||||||
|
Value: "1e+00",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
zeroFunc: func(i interface{}) {
|
||||||
|
if i != nil {
|
||||||
|
v := i.(*AlertDiscovery)
|
||||||
|
for _, alert := range v.Alerts {
|
||||||
|
alert.ActiveAt = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2009,7 +2088,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
Labels: labels.Labels{},
|
Labels: labels.Labels{},
|
||||||
Annotations: labels.Labels{},
|
Annotations: labels.Labels{},
|
||||||
Alerts: []*Alert{},
|
Alerts: []*Alert{},
|
||||||
Health: "unknown",
|
Health: "ok",
|
||||||
Type: "alerting",
|
Type: "alerting",
|
||||||
},
|
},
|
||||||
AlertingRule{
|
AlertingRule{
|
||||||
|
@ -2020,20 +2099,98 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
Labels: labels.Labels{},
|
Labels: labels.Labels{},
|
||||||
Annotations: labels.Labels{},
|
Annotations: labels.Labels{},
|
||||||
Alerts: []*Alert{},
|
Alerts: []*Alert{},
|
||||||
Health: "unknown",
|
Health: "ok",
|
||||||
Type: "alerting",
|
Type: "alerting",
|
||||||
},
|
},
|
||||||
|
AlertingRule{
|
||||||
|
State: "pending",
|
||||||
|
Name: "test_metric5",
|
||||||
|
Query: "vector(1)",
|
||||||
|
Duration: 1,
|
||||||
|
Labels: labels.FromStrings("name", "tm5"),
|
||||||
|
Annotations: labels.Labels{},
|
||||||
|
Alerts: []*Alert{
|
||||||
|
{
|
||||||
|
Labels: labels.FromStrings("alertname", "test_metric5", "name", "tm5"),
|
||||||
|
Annotations: labels.Labels{},
|
||||||
|
State: "pending",
|
||||||
|
Value: "1e+00",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Health: "ok",
|
||||||
|
Type: "alerting",
|
||||||
|
},
|
||||||
RecordingRule{
|
RecordingRule{
|
||||||
Name: "recording-rule-1",
|
Name: "recording-rule-1",
|
||||||
Query: "vector(1)",
|
Query: "vector(1)",
|
||||||
Labels: labels.Labels{},
|
Labels: labels.Labels{},
|
||||||
Health: "unknown",
|
Health: "ok",
|
||||||
Type: "recording",
|
Type: "recording",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
zeroFunc: rulesZeroFunc,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
endpoint: api.rules,
|
||||||
|
query: url.Values{
|
||||||
|
"exclude_alerts": []string{"true"},
|
||||||
|
},
|
||||||
|
response: &RuleDiscovery{
|
||||||
|
RuleGroups: []*RuleGroup{
|
||||||
|
{
|
||||||
|
Name: "grp",
|
||||||
|
File: "/path/to/file",
|
||||||
|
Interval: 1,
|
||||||
|
Limit: 0,
|
||||||
|
Rules: []Rule{
|
||||||
|
AlertingRule{
|
||||||
|
State: "inactive",
|
||||||
|
Name: "test_metric3",
|
||||||
|
Query: "absent(test_metric3) != 1",
|
||||||
|
Duration: 1,
|
||||||
|
Labels: labels.Labels{},
|
||||||
|
Annotations: labels.Labels{},
|
||||||
|
Alerts: nil,
|
||||||
|
Health: "ok",
|
||||||
|
Type: "alerting",
|
||||||
|
},
|
||||||
|
AlertingRule{
|
||||||
|
State: "inactive",
|
||||||
|
Name: "test_metric4",
|
||||||
|
Query: "up == 1",
|
||||||
|
Duration: 1,
|
||||||
|
Labels: labels.Labels{},
|
||||||
|
Annotations: labels.Labels{},
|
||||||
|
Alerts: nil,
|
||||||
|
Health: "ok",
|
||||||
|
Type: "alerting",
|
||||||
|
},
|
||||||
|
AlertingRule{
|
||||||
|
State: "pending",
|
||||||
|
Name: "test_metric5",
|
||||||
|
Query: "vector(1)",
|
||||||
|
Duration: 1,
|
||||||
|
Labels: labels.FromStrings("name", "tm5"),
|
||||||
|
Annotations: labels.Labels{},
|
||||||
|
Alerts: nil,
|
||||||
|
Health: "ok",
|
||||||
|
Type: "alerting",
|
||||||
|
},
|
||||||
|
RecordingRule{
|
||||||
|
Name: "recording-rule-1",
|
||||||
|
Query: "vector(1)",
|
||||||
|
Labels: labels.Labels{},
|
||||||
|
Health: "ok",
|
||||||
|
Type: "recording",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
zeroFunc: rulesZeroFunc,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
endpoint: api.rules,
|
endpoint: api.rules,
|
||||||
|
@ -2056,7 +2213,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
Labels: labels.Labels{},
|
Labels: labels.Labels{},
|
||||||
Annotations: labels.Labels{},
|
Annotations: labels.Labels{},
|
||||||
Alerts: []*Alert{},
|
Alerts: []*Alert{},
|
||||||
Health: "unknown",
|
Health: "ok",
|
||||||
Type: "alerting",
|
Type: "alerting",
|
||||||
},
|
},
|
||||||
AlertingRule{
|
AlertingRule{
|
||||||
|
@ -2067,13 +2224,32 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
Labels: labels.Labels{},
|
Labels: labels.Labels{},
|
||||||
Annotations: labels.Labels{},
|
Annotations: labels.Labels{},
|
||||||
Alerts: []*Alert{},
|
Alerts: []*Alert{},
|
||||||
Health: "unknown",
|
Health: "ok",
|
||||||
Type: "alerting",
|
Type: "alerting",
|
||||||
},
|
},
|
||||||
|
AlertingRule{
|
||||||
|
State: "pending",
|
||||||
|
Name: "test_metric5",
|
||||||
|
Query: "vector(1)",
|
||||||
|
Duration: 1,
|
||||||
|
Labels: labels.FromStrings("name", "tm5"),
|
||||||
|
Annotations: labels.Labels{},
|
||||||
|
Alerts: []*Alert{
|
||||||
|
{
|
||||||
|
Labels: labels.FromStrings("alertname", "test_metric5", "name", "tm5"),
|
||||||
|
Annotations: labels.Labels{},
|
||||||
|
State: "pending",
|
||||||
|
Value: "1e+00",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Health: "ok",
|
||||||
|
Type: "alerting",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
zeroFunc: rulesZeroFunc,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
endpoint: api.rules,
|
endpoint: api.rules,
|
||||||
|
@ -2092,13 +2268,14 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
Name: "recording-rule-1",
|
Name: "recording-rule-1",
|
||||||
Query: "vector(1)",
|
Query: "vector(1)",
|
||||||
Labels: labels.Labels{},
|
Labels: labels.Labels{},
|
||||||
Health: "unknown",
|
Health: "ok",
|
||||||
Type: "recording",
|
Type: "recording",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
zeroFunc: rulesZeroFunc,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
endpoint: api.rules,
|
endpoint: api.rules,
|
||||||
|
@ -2119,13 +2296,14 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
Labels: labels.Labels{},
|
Labels: labels.Labels{},
|
||||||
Annotations: labels.Labels{},
|
Annotations: labels.Labels{},
|
||||||
Alerts: []*Alert{},
|
Alerts: []*Alert{},
|
||||||
Health: "unknown",
|
Health: "ok",
|
||||||
Type: "alerting",
|
Type: "alerting",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
zeroFunc: rulesZeroFunc,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
endpoint: api.rules,
|
endpoint: api.rules,
|
||||||
|
@ -2151,13 +2329,14 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
Labels: labels.Labels{},
|
Labels: labels.Labels{},
|
||||||
Annotations: labels.Labels{},
|
Annotations: labels.Labels{},
|
||||||
Alerts: []*Alert{},
|
Alerts: []*Alert{},
|
||||||
Health: "unknown",
|
Health: "ok",
|
||||||
Type: "alerting",
|
Type: "alerting",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
zeroFunc: rulesZeroFunc,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
endpoint: api.queryExemplars,
|
endpoint: api.queryExemplars,
|
||||||
|
@ -2696,6 +2875,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
assertAPIResponseMetadataLen(t, res.data, test.responseMetadataTotal)
|
assertAPIResponseMetadataLen(t, res.data, test.responseMetadataTotal)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if test.zeroFunc != nil {
|
||||||
|
test.zeroFunc(res.data)
|
||||||
|
}
|
||||||
assertAPIResponse(t, res.data, test.response)
|
assertAPIResponse(t, res.data, test.response)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// nolint:revive // Many unsued function arguments in this file by design.
|
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "@prometheus-io/codemirror-promql",
|
"name": "@prometheus-io/codemirror-promql",
|
||||||
"version": "0.47.0",
|
"version": "0.48.0-rc.1",
|
||||||
"description": "a CodeMirror mode for the PromQL language",
|
"description": "a CodeMirror mode for the PromQL language",
|
||||||
"types": "dist/esm/index.d.ts",
|
"types": "dist/esm/index.d.ts",
|
||||||
"module": "dist/esm/index.js",
|
"module": "dist/esm/index.js",
|
||||||
|
@ -29,7 +29,7 @@
|
||||||
},
|
},
|
||||||
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@prometheus-io/lezer-promql": "0.47.0",
|
"@prometheus-io/lezer-promql": "0.48.0-rc.1",
|
||||||
"lru-cache": "^7.18.3"
|
"lru-cache": "^7.18.3"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "@prometheus-io/lezer-promql",
|
"name": "@prometheus-io/lezer-promql",
|
||||||
"version": "0.47.0",
|
"version": "0.48.0-rc.1",
|
||||||
"description": "lezer-based PromQL grammar",
|
"description": "lezer-based PromQL grammar",
|
||||||
"main": "dist/index.cjs",
|
"main": "dist/index.cjs",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
|
|
18
web/ui/package-lock.json
generated
18
web/ui/package-lock.json
generated
|
@ -1,12 +1,12 @@
|
||||||
{
|
{
|
||||||
"name": "prometheus-io",
|
"name": "prometheus-io",
|
||||||
"version": "0.46.0",
|
"version": "0.48.0-rc.1",
|
||||||
"lockfileVersion": 2,
|
"lockfileVersion": 2,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "prometheus-io",
|
"name": "prometheus-io",
|
||||||
"version": "0.46.0",
|
"version": "0.48.0-rc.1",
|
||||||
"workspaces": [
|
"workspaces": [
|
||||||
"react-app",
|
"react-app",
|
||||||
"module/*"
|
"module/*"
|
||||||
|
@ -30,10 +30,10 @@
|
||||||
},
|
},
|
||||||
"module/codemirror-promql": {
|
"module/codemirror-promql": {
|
||||||
"name": "@prometheus-io/codemirror-promql",
|
"name": "@prometheus-io/codemirror-promql",
|
||||||
"version": "0.47.0",
|
"version": "0.48.0-rc.1",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@prometheus-io/lezer-promql": "0.47.0",
|
"@prometheus-io/lezer-promql": "0.48.0-rc.1",
|
||||||
"lru-cache": "^7.18.3"
|
"lru-cache": "^7.18.3"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
@ -70,7 +70,7 @@
|
||||||
},
|
},
|
||||||
"module/lezer-promql": {
|
"module/lezer-promql": {
|
||||||
"name": "@prometheus-io/lezer-promql",
|
"name": "@prometheus-io/lezer-promql",
|
||||||
"version": "0.47.0",
|
"version": "0.48.0-rc.1",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@lezer/generator": "^1.2.3",
|
"@lezer/generator": "^1.2.3",
|
||||||
|
@ -20764,7 +20764,7 @@
|
||||||
},
|
},
|
||||||
"react-app": {
|
"react-app": {
|
||||||
"name": "@prometheus-io/app",
|
"name": "@prometheus-io/app",
|
||||||
"version": "0.47.0",
|
"version": "0.48.0-rc.1",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@codemirror/autocomplete": "^6.7.1",
|
"@codemirror/autocomplete": "^6.7.1",
|
||||||
"@codemirror/commands": "^6.2.4",
|
"@codemirror/commands": "^6.2.4",
|
||||||
|
@ -20782,7 +20782,7 @@
|
||||||
"@lezer/lr": "^1.3.6",
|
"@lezer/lr": "^1.3.6",
|
||||||
"@nexucis/fuzzy": "^0.4.1",
|
"@nexucis/fuzzy": "^0.4.1",
|
||||||
"@nexucis/kvsearch": "^0.8.1",
|
"@nexucis/kvsearch": "^0.8.1",
|
||||||
"@prometheus-io/codemirror-promql": "0.47.0",
|
"@prometheus-io/codemirror-promql": "0.48.0-rc.1",
|
||||||
"bootstrap": "^4.6.2",
|
"bootstrap": "^4.6.2",
|
||||||
"css.escape": "^1.5.1",
|
"css.escape": "^1.5.1",
|
||||||
"downshift": "^7.6.0",
|
"downshift": "^7.6.0",
|
||||||
|
@ -23422,7 +23422,7 @@
|
||||||
"@lezer/lr": "^1.3.6",
|
"@lezer/lr": "^1.3.6",
|
||||||
"@nexucis/fuzzy": "^0.4.1",
|
"@nexucis/fuzzy": "^0.4.1",
|
||||||
"@nexucis/kvsearch": "^0.8.1",
|
"@nexucis/kvsearch": "^0.8.1",
|
||||||
"@prometheus-io/codemirror-promql": "0.47.0",
|
"@prometheus-io/codemirror-promql": "0.48.0-rc.1",
|
||||||
"@testing-library/react-hooks": "^7.0.2",
|
"@testing-library/react-hooks": "^7.0.2",
|
||||||
"@types/enzyme": "^3.10.13",
|
"@types/enzyme": "^3.10.13",
|
||||||
"@types/flot": "0.0.32",
|
"@types/flot": "0.0.32",
|
||||||
|
@ -23486,7 +23486,7 @@
|
||||||
"@lezer/common": "^1.0.3",
|
"@lezer/common": "^1.0.3",
|
||||||
"@lezer/highlight": "^1.1.6",
|
"@lezer/highlight": "^1.1.6",
|
||||||
"@lezer/lr": "^1.3.6",
|
"@lezer/lr": "^1.3.6",
|
||||||
"@prometheus-io/lezer-promql": "0.47.0",
|
"@prometheus-io/lezer-promql": "0.48.0-rc.1",
|
||||||
"isomorphic-fetch": "^3.0.0",
|
"isomorphic-fetch": "^3.0.0",
|
||||||
"lru-cache": "^7.18.3",
|
"lru-cache": "^7.18.3",
|
||||||
"nock": "^13.3.1"
|
"nock": "^13.3.1"
|
||||||
|
|
|
@ -28,5 +28,5 @@
|
||||||
"ts-jest": "^29.1.0",
|
"ts-jest": "^29.1.0",
|
||||||
"typescript": "^4.9.5"
|
"typescript": "^4.9.5"
|
||||||
},
|
},
|
||||||
"version": "0.46.0"
|
"version": "0.48.0-rc.1"
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "@prometheus-io/app",
|
"name": "@prometheus-io/app",
|
||||||
"version": "0.47.0",
|
"version": "0.48.0-rc.1",
|
||||||
"private": true,
|
"private": true,
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@codemirror/autocomplete": "^6.7.1",
|
"@codemirror/autocomplete": "^6.7.1",
|
||||||
|
@ -19,7 +19,7 @@
|
||||||
"@lezer/lr": "^1.3.6",
|
"@lezer/lr": "^1.3.6",
|
||||||
"@nexucis/fuzzy": "^0.4.1",
|
"@nexucis/fuzzy": "^0.4.1",
|
||||||
"@nexucis/kvsearch": "^0.8.1",
|
"@nexucis/kvsearch": "^0.8.1",
|
||||||
"@prometheus-io/codemirror-promql": "0.47.0",
|
"@prometheus-io/codemirror-promql": "0.48.0-rc.1",
|
||||||
"bootstrap": "^4.6.2",
|
"bootstrap": "^4.6.2",
|
||||||
"css.escape": "^1.5.1",
|
"css.escape": "^1.5.1",
|
||||||
"downshift": "^7.6.0",
|
"downshift": "^7.6.0",
|
||||||
|
|
Loading…
Reference in a new issue