mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge pull request #12241 from mmorel-35/linter/nilerr
enable gocritic, unconvert and unused linters
This commit is contained in:
commit
e2512078e5
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
|
@ -149,6 +149,8 @@ jobs:
|
||||||
- name: Lint
|
- name: Lint
|
||||||
uses: golangci/golangci-lint-action@v3.4.0
|
uses: golangci/golangci-lint-action@v3.4.0
|
||||||
with:
|
with:
|
||||||
|
args: --verbose
|
||||||
|
skip-cache: true
|
||||||
version: v1.51.2
|
version: v1.51.2
|
||||||
fuzzing:
|
fuzzing:
|
||||||
uses: ./.github/workflows/fuzzing.yml
|
uses: ./.github/workflows/fuzzing.yml
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
run:
|
run:
|
||||||
deadline: 5m
|
timeout: 15m
|
||||||
skip-files:
|
skip-files:
|
||||||
# Skip autogenerated files.
|
# Skip autogenerated files.
|
||||||
- ^.*\.(pb|y)\.go$
|
- ^.*\.(pb|y)\.go$
|
||||||
|
@ -10,14 +10,23 @@ output:
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- depguard
|
- depguard
|
||||||
|
- gocritic
|
||||||
- gofumpt
|
- gofumpt
|
||||||
- goimports
|
- goimports
|
||||||
- revive
|
- revive
|
||||||
- misspell
|
- misspell
|
||||||
|
- unconvert
|
||||||
|
- unused
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
|
- linters:
|
||||||
|
- gocritic
|
||||||
|
text: "appendAssign"
|
||||||
|
- linters:
|
||||||
|
- gocritic
|
||||||
|
text: "singleCaseSwitch"
|
||||||
- path: _test.go
|
- path: _test.go
|
||||||
linters:
|
linters:
|
||||||
- errcheck
|
- errcheck
|
||||||
|
|
|
@ -491,7 +491,7 @@ func main() {
|
||||||
if cfgFile.StorageConfig.ExemplarsConfig == nil {
|
if cfgFile.StorageConfig.ExemplarsConfig == nil {
|
||||||
cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig
|
cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig
|
||||||
}
|
}
|
||||||
cfg.tsdb.MaxExemplars = int64(cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars)
|
cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars
|
||||||
}
|
}
|
||||||
if cfgFile.StorageConfig.TSDBConfig != nil {
|
if cfgFile.StorageConfig.TSDBConfig != nil {
|
||||||
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
|
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
|
||||||
|
|
|
@ -193,7 +193,7 @@ func (p *queryLogTest) String() string {
|
||||||
}
|
}
|
||||||
name = name + ", " + p.host + ":" + strconv.Itoa(p.port)
|
name = name + ", " + p.host + ":" + strconv.Itoa(p.port)
|
||||||
if p.enabledAtStart {
|
if p.enabledAtStart {
|
||||||
name = name + ", enabled at start"
|
name += ", enabled at start"
|
||||||
}
|
}
|
||||||
if p.prefix != "" {
|
if p.prefix != "" {
|
||||||
name = name + ", with prefix " + p.prefix
|
name = name + ", with prefix " + p.prefix
|
||||||
|
|
|
@ -101,7 +101,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
||||||
nextSampleTs int64 = math.MaxInt64
|
nextSampleTs int64 = math.MaxInt64
|
||||||
)
|
)
|
||||||
|
|
||||||
for t := mint; t <= maxt; t = t + blockDuration {
|
for t := mint; t <= maxt; t += blockDuration {
|
||||||
tsUpper := t + blockDuration
|
tsUpper := t + blockDuration
|
||||||
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
|
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
|
||||||
// The next sample is not in this timerange, we can avoid parsing
|
// The next sample is not in this timerange, we can avoid parsing
|
||||||
|
|
|
@ -100,7 +100,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
||||||
startInMs := start.Unix() * int64(time.Second/time.Millisecond)
|
startInMs := start.Unix() * int64(time.Second/time.Millisecond)
|
||||||
endInMs := end.Unix() * int64(time.Second/time.Millisecond)
|
endInMs := end.Unix() * int64(time.Second/time.Millisecond)
|
||||||
|
|
||||||
for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock = startOfBlock + blockDuration {
|
for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock += blockDuration {
|
||||||
endOfBlock := startOfBlock + blockDuration - 1
|
endOfBlock := startOfBlock + blockDuration - 1
|
||||||
|
|
||||||
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())
|
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())
|
||||||
|
|
|
@ -130,7 +130,7 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(m) <= 0 {
|
if len(m) == 0 {
|
||||||
fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf)
|
fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf)
|
||||||
}
|
}
|
||||||
globbedFiles = append(globbedFiles, m...)
|
globbedFiles = append(globbedFiles, m...)
|
||||||
|
|
|
@ -300,7 +300,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
}
|
}
|
||||||
|
|
||||||
if port.protocol() != nil {
|
if port.protocol() != nil {
|
||||||
target[endpointSlicePortProtocolLabel] = lv(string(*port.protocol()))
|
target[endpointSlicePortProtocolLabel] = lv(*port.protocol())
|
||||||
}
|
}
|
||||||
|
|
||||||
if port.port() != nil {
|
if port.port() != nil {
|
||||||
|
|
|
@ -254,7 +254,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
|
||||||
oldStr := oldTyp.String()
|
oldStr := oldTyp.String()
|
||||||
newStr := newTyp.String()
|
newStr := newTyp.String()
|
||||||
for i, s := range e.Errors {
|
for i, s := range e.Errors {
|
||||||
e.Errors[i] = strings.Replace(s, oldStr, newStr, -1)
|
e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -249,7 +249,6 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
|
||||||
if detailedIP.Address != ip.String() {
|
if detailedIP.Address != ip.String() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case detailedIP.Public && publicIPv4 == "":
|
case detailedIP.Public && publicIPv4 == "":
|
||||||
publicIPv4 = detailedIP.Address
|
publicIPv4 = detailedIP.Address
|
||||||
|
|
|
@ -84,7 +84,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
if string(r.URL.Path) == "/dedicated/server" {
|
if r.URL.Path == "/dedicated/server" {
|
||||||
dedicatedServersList, err := os.ReadFile("testdata/dedicated_server/dedicated_servers.json")
|
dedicatedServersList, err := os.ReadFile("testdata/dedicated_server/dedicated_servers.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
@ -96,7 +96,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if string(r.URL.Path) == "/dedicated/server/abcde" {
|
if r.URL.Path == "/dedicated/server/abcde" {
|
||||||
dedicatedServer, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_details.json")
|
dedicatedServer, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_details.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
@ -108,7 +108,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if string(r.URL.Path) == "/dedicated/server/abcde/ips" {
|
if r.URL.Path == "/dedicated/server/abcde/ips" {
|
||||||
dedicatedServerIPs, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_abcde_ips.json")
|
dedicatedServerIPs, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_abcde_ips.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
|
|
@ -91,7 +91,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
if string(r.URL.Path) == "/vps" {
|
if r.URL.Path == "/vps" {
|
||||||
dedicatedServersList, err := os.ReadFile("testdata/vps/vps.json")
|
dedicatedServersList, err := os.ReadFile("testdata/vps/vps.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
@ -103,7 +103,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if string(r.URL.Path) == "/vps/abc" {
|
if r.URL.Path == "/vps/abc" {
|
||||||
dedicatedServer, err := os.ReadFile("testdata/vps/vps_details.json")
|
dedicatedServer, err := os.ReadFile("testdata/vps/vps_details.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
@ -115,7 +115,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if string(r.URL.Path) == "/vps/abc/ips" {
|
if r.URL.Path == "/vps/abc/ips" {
|
||||||
dedicatedServerIPs, err := os.ReadFile("testdata/vps/vps_abc_ips.json")
|
dedicatedServerIPs, err := os.ReadFile("testdata/vps/vps_abc_ips.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
|
|
@ -253,7 +253,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
|
||||||
oldStr := oldTyp.String()
|
oldStr := oldTyp.String()
|
||||||
newStr := newTyp.String()
|
newStr := newTyp.String()
|
||||||
for i, s := range e.Errors {
|
for i, s := range e.Errors {
|
||||||
e.Errors[i] = strings.Replace(s, oldStr, newStr, -1)
|
e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -184,11 +184,11 @@ func (c *Client) buildCommand(q *prompb.Query) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func escapeSingleQuotes(str string) string {
|
func escapeSingleQuotes(str string) string {
|
||||||
return strings.Replace(str, `'`, `\'`, -1)
|
return strings.ReplaceAll(str, `'`, `\'`)
|
||||||
}
|
}
|
||||||
|
|
||||||
func escapeSlashes(str string) string {
|
func escapeSlashes(str string) string {
|
||||||
return strings.Replace(str, `/`, `\/`, -1)
|
return strings.ReplaceAll(str, `/`, `\/`)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, results []influx.Result) error {
|
func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, results []influx.Result) error {
|
||||||
|
|
|
@ -512,7 +512,7 @@ func BenchmarkGzip(b *testing.B) {
|
||||||
k := b.N / promtestdataSampleCount
|
k := b.N / promtestdataSampleCount
|
||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.SetBytes(int64(n) / promtestdataSampleCount)
|
b.SetBytes(n / promtestdataSampleCount)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
total := 0
|
total := 0
|
||||||
|
|
|
@ -194,9 +194,9 @@ func rangeQueryCases() []benchCase {
|
||||||
if !strings.Contains(c.expr, "X") {
|
if !strings.Contains(c.expr, "X") {
|
||||||
tmp = append(tmp, c)
|
tmp = append(tmp, c)
|
||||||
} else {
|
} else {
|
||||||
tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "one", -1), steps: c.steps})
|
tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "one"), steps: c.steps})
|
||||||
tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "ten", -1), steps: c.steps})
|
tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "ten"), steps: c.steps})
|
||||||
tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "hundred", -1), steps: c.steps})
|
tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "hundred"), steps: c.steps})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cases = tmp
|
cases = tmp
|
||||||
|
|
|
@ -783,7 +783,6 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) {
|
||||||
maxTimestamp = end
|
maxTimestamp = end
|
||||||
}
|
}
|
||||||
evalRange = 0
|
evalRange = 0
|
||||||
|
|
||||||
case *parser.MatrixSelector:
|
case *parser.MatrixSelector:
|
||||||
evalRange = n.Range
|
evalRange = n.Range
|
||||||
}
|
}
|
||||||
|
@ -816,20 +815,20 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS
|
||||||
} else {
|
} else {
|
||||||
offsetMilliseconds := durationMilliseconds(subqOffset)
|
offsetMilliseconds := durationMilliseconds(subqOffset)
|
||||||
start = start - offsetMilliseconds - durationMilliseconds(subqRange)
|
start = start - offsetMilliseconds - durationMilliseconds(subqRange)
|
||||||
end = end - offsetMilliseconds
|
end -= offsetMilliseconds
|
||||||
}
|
}
|
||||||
|
|
||||||
if evalRange == 0 {
|
if evalRange == 0 {
|
||||||
start = start - durationMilliseconds(s.LookbackDelta)
|
start -= durationMilliseconds(s.LookbackDelta)
|
||||||
} else {
|
} else {
|
||||||
// For all matrix queries we want to ensure that we have (end-start) + range selected
|
// For all matrix queries we want to ensure that we have (end-start) + range selected
|
||||||
// this way we have `range` data before the start time
|
// this way we have `range` data before the start time
|
||||||
start = start - durationMilliseconds(evalRange)
|
start -= durationMilliseconds(evalRange)
|
||||||
}
|
}
|
||||||
|
|
||||||
offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
|
offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
|
||||||
start = start - offsetMilliseconds
|
start -= offsetMilliseconds
|
||||||
end = end - offsetMilliseconds
|
end -= offsetMilliseconds
|
||||||
|
|
||||||
return start, end
|
return start, end
|
||||||
}
|
}
|
||||||
|
@ -1745,7 +1744,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
|
||||||
res, ws := newEv.eval(e.Expr)
|
res, ws := newEv.eval(e.Expr)
|
||||||
ev.currentSamples = newEv.currentSamples
|
ev.currentSamples = newEv.currentSamples
|
||||||
ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats)
|
ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats)
|
||||||
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts = ts + ev.interval {
|
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval {
|
||||||
step++
|
step++
|
||||||
ev.samplesStats.IncrementSamplesAtStep(step, newEv.samplesStats.TotalSamples)
|
ev.samplesStats.IncrementSamplesAtStep(step, newEv.samplesStats.TotalSamples)
|
||||||
}
|
}
|
||||||
|
@ -1767,7 +1766,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
|
||||||
if len(mat[i].Floats)+len(mat[i].Histograms) != 1 {
|
if len(mat[i].Floats)+len(mat[i].Histograms) != 1 {
|
||||||
panic(fmt.Errorf("unexpected number of samples"))
|
panic(fmt.Errorf("unexpected number of samples"))
|
||||||
}
|
}
|
||||||
for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts = ts + ev.interval {
|
for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval {
|
||||||
if len(mat[i].Floats) > 0 {
|
if len(mat[i].Floats) > 0 {
|
||||||
mat[i].Floats = append(mat[i].Floats, FPoint{
|
mat[i].Floats = append(mat[i].Floats, FPoint{
|
||||||
T: ts,
|
T: ts,
|
||||||
|
@ -2695,7 +2694,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
||||||
aggr.floatValue = float64(aggr.groupCount)
|
aggr.floatValue = float64(aggr.groupCount)
|
||||||
|
|
||||||
case parser.STDVAR:
|
case parser.STDVAR:
|
||||||
aggr.floatValue = aggr.floatValue / float64(aggr.groupCount)
|
aggr.floatValue /= float64(aggr.groupCount)
|
||||||
|
|
||||||
case parser.STDDEV:
|
case parser.STDDEV:
|
||||||
aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount))
|
aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount))
|
||||||
|
|
|
@ -3269,7 +3269,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) {
|
||||||
require.Len(t, vector, 1)
|
require.Len(t, vector, 1)
|
||||||
require.Nil(t, vector[0].H)
|
require.Nil(t, vector[0].H)
|
||||||
if floatHisto {
|
if floatHisto {
|
||||||
require.Equal(t, float64(h.ToFloat().Count), vector[0].F)
|
require.Equal(t, h.ToFloat().Count, vector[0].F)
|
||||||
} else {
|
} else {
|
||||||
require.Equal(t, float64(h.Count), vector[0].F)
|
require.Equal(t, float64(h.Count), vector[0].F)
|
||||||
}
|
}
|
||||||
|
|
|
@ -880,10 +880,10 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f
|
||||||
}
|
}
|
||||||
return 0, initY
|
return 0, initY
|
||||||
}
|
}
|
||||||
sumX = sumX + cX
|
sumX += cX
|
||||||
sumY = sumY + cY
|
sumY += cY
|
||||||
sumXY = sumXY + cXY
|
sumXY += cXY
|
||||||
sumX2 = sumX2 + cX2
|
sumX2 += cX2
|
||||||
|
|
||||||
covXY := sumXY - sumX*sumY/n
|
covXY := sumXY - sumX*sumY/n
|
||||||
varX := sumX2 - sumX*sumX/n
|
varX := sumX2 - sumX*sumX/n
|
||||||
|
|
|
@ -51,7 +51,7 @@ func TestDeriv(t *testing.T) {
|
||||||
// https://github.com/prometheus/prometheus/issues/7180
|
// https://github.com/prometheus/prometheus/issues/7180
|
||||||
for i = 0; i < 15; i++ {
|
for i = 0; i < 15; i++ {
|
||||||
jitter := 12 * i % 2
|
jitter := 12 * i % 2
|
||||||
a.Append(0, metric, int64(start+interval*i+jitter), 1)
|
a.Append(0, metric, start+interval*i+jitter, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, a.Commit())
|
require.NoError(t, a.Commit())
|
||||||
|
|
|
@ -349,7 +349,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
|
||||||
// for all the non-nil children of node, recursively.
|
// for all the non-nil children of node, recursively.
|
||||||
func Inspect(node Node, f inspector) {
|
func Inspect(node Node, f inspector) {
|
||||||
//nolint: errcheck
|
//nolint: errcheck
|
||||||
Walk(inspector(f), node, nil)
|
Walk(f, node, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Children returns a list of all child nodes of a syntax tree node.
|
// Children returns a list of all child nodes of a syntax tree node.
|
||||||
|
|
|
@ -3592,7 +3592,7 @@ func TestNaNExpression(t *testing.T) {
|
||||||
|
|
||||||
nl, ok := expr.(*NumberLiteral)
|
nl, ok := expr.(*NumberLiteral)
|
||||||
require.True(t, ok, "expected number literal but got %T", expr)
|
require.True(t, ok, "expected number literal but got %T", expr)
|
||||||
require.True(t, math.IsNaN(float64(nl.Val)), "expected 'NaN' in number literal but got %v", nl.Val)
|
require.True(t, math.IsNaN(nl.Val), "expected 'NaN' in number literal but got %v", nl.Val)
|
||||||
}
|
}
|
||||||
|
|
||||||
var testSeries = []struct {
|
var testSeries = []struct {
|
||||||
|
|
|
@ -641,7 +641,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
|
||||||
met := lset.Get(labels.MetricName)
|
met := lset.Get(labels.MetricName)
|
||||||
if limits.labelLimit > 0 {
|
if limits.labelLimit > 0 {
|
||||||
nbLabels := lset.Len()
|
nbLabels := lset.Len()
|
||||||
if nbLabels > int(limits.labelLimit) {
|
if nbLabels > limits.labelLimit {
|
||||||
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit)
|
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -653,14 +653,14 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
|
||||||
return lset.Validate(func(l labels.Label) error {
|
return lset.Validate(func(l labels.Label) error {
|
||||||
if limits.labelNameLengthLimit > 0 {
|
if limits.labelNameLengthLimit > 0 {
|
||||||
nameLength := len(l.Name)
|
nameLength := len(l.Name)
|
||||||
if nameLength > int(limits.labelNameLengthLimit) {
|
if nameLength > limits.labelNameLengthLimit {
|
||||||
return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit)
|
return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if limits.labelValueLengthLimit > 0 {
|
if limits.labelValueLengthLimit > 0 {
|
||||||
valueLength := len(l.Value)
|
valueLength := len(l.Value)
|
||||||
if valueLength > int(limits.labelValueLengthLimit) {
|
if valueLength > limits.labelValueLengthLimit {
|
||||||
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit)
|
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -322,7 +322,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
|
||||||
ScrapeTimeout: model.Duration(2 * time.Second),
|
ScrapeTimeout: model.Duration(2 * time.Second),
|
||||||
}
|
}
|
||||||
newLoop := func(opts scrapeLoopOptions) loop {
|
newLoop := func(opts scrapeLoopOptions) loop {
|
||||||
l := &testLoop{interval: time.Duration(opts.interval), timeout: time.Duration(opts.timeout)}
|
l := &testLoop{interval: opts.interval, timeout: opts.timeout}
|
||||||
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
|
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
|
||||||
require.Equal(t, 5*time.Second, interval, "Unexpected scrape interval")
|
require.Equal(t, 5*time.Second, interval, "Unexpected scrape interval")
|
||||||
require.Equal(t, 3*time.Second, timeout, "Unexpected scrape timeout")
|
require.Equal(t, 3*time.Second, timeout, "Unexpected scrape timeout")
|
||||||
|
@ -546,7 +546,7 @@ func TestScrapePoolRaces(t *testing.T) {
|
||||||
require.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets")
|
require.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets")
|
||||||
|
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
time.Sleep(time.Duration(10 * time.Millisecond))
|
time.Sleep(10 * time.Millisecond)
|
||||||
sp.reload(newConfig())
|
sp.reload(newConfig())
|
||||||
}
|
}
|
||||||
sp.stop()
|
sp.stop()
|
||||||
|
@ -1201,7 +1201,6 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
||||||
// Succeed once, several failures, then stop.
|
// Succeed once, several failures, then stop.
|
||||||
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
||||||
numScrapes++
|
numScrapes++
|
||||||
|
|
||||||
switch numScrapes {
|
switch numScrapes {
|
||||||
case 1:
|
case 1:
|
||||||
w.Write([]byte("metric_a 42\n"))
|
w.Write([]byte("metric_a 42\n"))
|
||||||
|
@ -1286,7 +1285,6 @@ func TestScrapeLoopCache(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
numScrapes++
|
numScrapes++
|
||||||
|
|
||||||
switch numScrapes {
|
switch numScrapes {
|
||||||
case 1:
|
case 1:
|
||||||
w.Write([]byte("metric_a 42\nmetric_b 43\n"))
|
w.Write([]byte("metric_a 42\nmetric_b 43\n"))
|
||||||
|
|
|
@ -413,9 +413,9 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
|
||||||
// Addresses reaching this point are already wrapped in [] if necessary.
|
// Addresses reaching this point are already wrapped in [] if necessary.
|
||||||
switch scheme {
|
switch scheme {
|
||||||
case "http", "":
|
case "http", "":
|
||||||
addr = addr + ":80"
|
addr += ":80"
|
||||||
case "https":
|
case "https":
|
||||||
addr = addr + ":443"
|
addr += ":443"
|
||||||
default:
|
default:
|
||||||
return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme)
|
return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme)
|
||||||
}
|
}
|
||||||
|
|
|
@ -429,7 +429,6 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
c.histogramsCur += sort.Search(len(c.series.histograms)-c.histogramsCur, func(n int) bool {
|
c.histogramsCur += sort.Search(len(c.series.histograms)-c.histogramsCur, func(n int) bool {
|
||||||
return c.series.histograms[n+c.histogramsCur].Timestamp >= t
|
return c.series.histograms[n+c.histogramsCur].Timestamp >= t
|
||||||
})
|
})
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms):
|
case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms):
|
||||||
// If float samples and histogram samples have overlapping timestamps prefer the float samples.
|
// If float samples and histogram samples have overlapping timestamps prefer the float samples.
|
||||||
|
@ -452,7 +451,6 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
case c.histogramsCur < len(c.series.histograms):
|
case c.histogramsCur < len(c.series.histograms):
|
||||||
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
|
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.curValType
|
return c.curValType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -516,7 +514,6 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
|
||||||
peekHistTS = c.series.histograms[c.histogramsCur+1].Timestamp
|
peekHistTS = c.series.histograms[c.histogramsCur+1].Timestamp
|
||||||
}
|
}
|
||||||
c.curValType = chunkenc.ValNone
|
c.curValType = chunkenc.ValNone
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case peekFloatTS < peekHistTS:
|
case peekFloatTS < peekHistTS:
|
||||||
c.floatsCur++
|
c.floatsCur++
|
||||||
|
@ -536,7 +533,6 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
|
||||||
c.histogramsCur++
|
c.histogramsCur++
|
||||||
c.curValType = chunkenc.ValFloat
|
c.curValType = chunkenc.ValFloat
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.curValType
|
return c.curValType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -609,7 +609,7 @@ outer:
|
||||||
|
|
||||||
t.metrics.enqueueRetriesTotal.Inc()
|
t.metrics.enqueueRetriesTotal.Inc()
|
||||||
time.Sleep(time.Duration(backoff))
|
time.Sleep(time.Duration(backoff))
|
||||||
backoff = backoff * 2
|
backoff *= 2
|
||||||
// It is reasonable to use t.cfg.MaxBackoff here, as if we have hit
|
// It is reasonable to use t.cfg.MaxBackoff here, as if we have hit
|
||||||
// the full backoff we are likely waiting for external resources.
|
// the full backoff we are likely waiting for external resources.
|
||||||
if backoff > t.cfg.MaxBackoff {
|
if backoff > t.cfg.MaxBackoff {
|
||||||
|
@ -660,7 +660,7 @@ outer:
|
||||||
|
|
||||||
t.metrics.enqueueRetriesTotal.Inc()
|
t.metrics.enqueueRetriesTotal.Inc()
|
||||||
time.Sleep(time.Duration(backoff))
|
time.Sleep(time.Duration(backoff))
|
||||||
backoff = backoff * 2
|
backoff *= 2
|
||||||
if backoff > t.cfg.MaxBackoff {
|
if backoff > t.cfg.MaxBackoff {
|
||||||
backoff = t.cfg.MaxBackoff
|
backoff = t.cfg.MaxBackoff
|
||||||
}
|
}
|
||||||
|
@ -707,7 +707,7 @@ outer:
|
||||||
|
|
||||||
t.metrics.enqueueRetriesTotal.Inc()
|
t.metrics.enqueueRetriesTotal.Inc()
|
||||||
time.Sleep(time.Duration(backoff))
|
time.Sleep(time.Duration(backoff))
|
||||||
backoff = backoff * 2
|
backoff *= 2
|
||||||
if backoff > t.cfg.MaxBackoff {
|
if backoff > t.cfg.MaxBackoff {
|
||||||
backoff = t.cfg.MaxBackoff
|
backoff = t.cfg.MaxBackoff
|
||||||
}
|
}
|
||||||
|
@ -754,7 +754,7 @@ outer:
|
||||||
|
|
||||||
t.metrics.enqueueRetriesTotal.Inc()
|
t.metrics.enqueueRetriesTotal.Inc()
|
||||||
time.Sleep(time.Duration(backoff))
|
time.Sleep(time.Duration(backoff))
|
||||||
backoff = backoff * 2
|
backoff *= 2
|
||||||
if backoff > t.cfg.MaxBackoff {
|
if backoff > t.cfg.MaxBackoff {
|
||||||
backoff = t.cfg.MaxBackoff
|
backoff = t.cfg.MaxBackoff
|
||||||
}
|
}
|
||||||
|
|
|
@ -421,7 +421,7 @@ func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
//nolint:unconvert // Before Go 1.19 conversion from text_template to html_template is mandatory
|
||||||
tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap))
|
tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap))
|
||||||
tmpl.Option(te.options...)
|
tmpl.Option(te.options...)
|
||||||
tmpl.Funcs(html_template.FuncMap{
|
tmpl.Funcs(html_template.FuncMap{
|
||||||
|
|
|
@ -739,8 +739,7 @@ func TestStorage_DuplicateExemplarsIgnored(t *testing.T) {
|
||||||
var dec record.Decoder
|
var dec record.Decoder
|
||||||
for r.Next() {
|
for r.Next() {
|
||||||
rec := r.Record()
|
rec := r.Record()
|
||||||
switch dec.Type(rec) {
|
if dec.Type(rec) == record.Exemplars {
|
||||||
case record.Exemplars:
|
|
||||||
var exemplars []record.RefExemplar
|
var exemplars []record.RefExemplar
|
||||||
exemplars, err = dec.Exemplars(rec, exemplars)
|
exemplars, err = dec.Exemplars(rec, exemplars)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -630,7 +630,7 @@ func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, flo
|
||||||
{Offset: 0, Length: 2},
|
{Offset: 0, Length: 2},
|
||||||
{Offset: 1, Length: 2},
|
{Offset: 1, Length: 2},
|
||||||
},
|
},
|
||||||
PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0},
|
PositiveBuckets: []int64{ts + 1, 1, -1, 0},
|
||||||
}
|
}
|
||||||
if ts != mint {
|
if ts != mint {
|
||||||
// By setting the counter reset hint to "no counter
|
// By setting the counter reset hint to "no counter
|
||||||
|
@ -669,7 +669,7 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in
|
||||||
{Offset: 0, Length: 2},
|
{Offset: 0, Length: 2},
|
||||||
{Offset: 1, Length: 2},
|
{Offset: 1, Length: 2},
|
||||||
},
|
},
|
||||||
PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0},
|
PositiveBuckets: []int64{ts + 1, 1, -1, 0},
|
||||||
}
|
}
|
||||||
if count > 1 && count%5 != 1 {
|
if count > 1 && count%5 != 1 {
|
||||||
// Same rationale for this as above in
|
// Same rationale for this as above in
|
||||||
|
|
|
@ -182,7 +182,7 @@ func (b *bstreamReader) readBits(nbits uint8) (uint64, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bitmask = (uint64(1) << nbits) - 1
|
bitmask = (uint64(1) << nbits) - 1
|
||||||
v = v | ((b.buffer >> (b.valid - nbits)) & bitmask)
|
v |= ((b.buffer >> (b.valid - nbits)) & bitmask)
|
||||||
b.valid -= nbits
|
b.valid -= nbits
|
||||||
|
|
||||||
return v, nil
|
return v, nil
|
||||||
|
@ -242,13 +242,13 @@ func (b *bstreamReader) loadNextBuffer(nbits uint8) bool {
|
||||||
if b.streamOffset+nbytes == len(b.stream) {
|
if b.streamOffset+nbytes == len(b.stream) {
|
||||||
// There can be concurrent writes happening on the very last byte
|
// There can be concurrent writes happening on the very last byte
|
||||||
// of the stream, so use the copy we took at initialization time.
|
// of the stream, so use the copy we took at initialization time.
|
||||||
buffer = buffer | uint64(b.last)
|
buffer |= uint64(b.last)
|
||||||
// Read up to the byte before
|
// Read up to the byte before
|
||||||
skip = 1
|
skip = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < nbytes-skip; i++ {
|
for i := 0; i < nbytes-skip; i++ {
|
||||||
buffer = buffer | (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1)))
|
buffer |= (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
b.buffer = buffer
|
b.buffer = buffer
|
||||||
|
|
|
@ -785,7 +785,7 @@ func (it *floatHistogramIterator) Next() ValueType {
|
||||||
it.err = err
|
it.err = err
|
||||||
return ValNone
|
return ValNone
|
||||||
}
|
}
|
||||||
it.tDelta = it.tDelta + tDod
|
it.tDelta += tDod
|
||||||
it.t += it.tDelta
|
it.t += it.tDelta
|
||||||
|
|
||||||
if ok := it.readXor(&it.cnt.value, &it.cnt.leading, &it.cnt.trailing); !ok {
|
if ok := it.readXor(&it.cnt.value, &it.cnt.leading, &it.cnt.trailing); !ok {
|
||||||
|
|
|
@ -875,7 +875,7 @@ func (it *histogramIterator) Next() ValueType {
|
||||||
it.err = err
|
it.err = err
|
||||||
return ValNone
|
return ValNone
|
||||||
}
|
}
|
||||||
it.tDelta = it.tDelta + tDod
|
it.tDelta += tDod
|
||||||
it.t += it.tDelta
|
it.t += it.tDelta
|
||||||
|
|
||||||
cntDod, err := readVarbitInt(&it.br)
|
cntDod, err := readVarbitInt(&it.br)
|
||||||
|
@ -883,7 +883,7 @@ func (it *histogramIterator) Next() ValueType {
|
||||||
it.err = err
|
it.err = err
|
||||||
return ValNone
|
return ValNone
|
||||||
}
|
}
|
||||||
it.cntDelta = it.cntDelta + cntDod
|
it.cntDelta += cntDod
|
||||||
it.cnt = uint64(int64(it.cnt) + it.cntDelta)
|
it.cnt = uint64(int64(it.cnt) + it.cntDelta)
|
||||||
|
|
||||||
zcntDod, err := readVarbitInt(&it.br)
|
zcntDod, err := readVarbitInt(&it.br)
|
||||||
|
@ -891,7 +891,7 @@ func (it *histogramIterator) Next() ValueType {
|
||||||
it.err = err
|
it.err = err
|
||||||
return ValNone
|
return ValNone
|
||||||
}
|
}
|
||||||
it.zCntDelta = it.zCntDelta + zcntDod
|
it.zCntDelta += zcntDod
|
||||||
it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta)
|
it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta)
|
||||||
|
|
||||||
ok := it.readSum()
|
ok := it.readSum()
|
||||||
|
|
|
@ -122,7 +122,7 @@ func readVarbitInt(b *bstreamReader) (int64, error) {
|
||||||
}
|
}
|
||||||
if bits > (1 << (sz - 1)) {
|
if bits > (1 << (sz - 1)) {
|
||||||
// Or something.
|
// Or something.
|
||||||
bits = bits - (1 << sz)
|
bits -= (1 << sz)
|
||||||
}
|
}
|
||||||
val = int64(bits)
|
val = int64(bits)
|
||||||
}
|
}
|
||||||
|
|
|
@ -163,7 +163,6 @@ func (a *xorAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) {
|
||||||
func (a *xorAppender) Append(t int64, v float64) {
|
func (a *xorAppender) Append(t int64, v float64) {
|
||||||
var tDelta uint64
|
var tDelta uint64
|
||||||
num := binary.BigEndian.Uint16(a.b.bytes())
|
num := binary.BigEndian.Uint16(a.b.bytes())
|
||||||
|
|
||||||
switch num {
|
switch num {
|
||||||
case 0:
|
case 0:
|
||||||
buf := make([]byte, binary.MaxVarintLen64)
|
buf := make([]byte, binary.MaxVarintLen64)
|
||||||
|
@ -171,7 +170,6 @@ func (a *xorAppender) Append(t int64, v float64) {
|
||||||
a.b.writeByte(b)
|
a.b.writeByte(b)
|
||||||
}
|
}
|
||||||
a.b.writeBits(math.Float64bits(v), 64)
|
a.b.writeBits(math.Float64bits(v), 64)
|
||||||
|
|
||||||
case 1:
|
case 1:
|
||||||
tDelta = uint64(t - a.t)
|
tDelta = uint64(t - a.t)
|
||||||
|
|
||||||
|
@ -322,7 +320,7 @@ func (it *xorIterator) Next() ValueType {
|
||||||
return ValNone
|
return ValNone
|
||||||
}
|
}
|
||||||
it.tDelta = tDelta
|
it.tDelta = tDelta
|
||||||
it.t = it.t + int64(it.tDelta)
|
it.t += int64(it.tDelta)
|
||||||
|
|
||||||
return it.readValue()
|
return it.readValue()
|
||||||
}
|
}
|
||||||
|
@ -385,7 +383,7 @@ func (it *xorIterator) Next() ValueType {
|
||||||
}
|
}
|
||||||
|
|
||||||
it.tDelta = uint64(int64(it.tDelta) + dod)
|
it.tDelta = uint64(int64(it.tDelta) + dod)
|
||||||
it.t = it.t + int64(it.tDelta)
|
it.t += int64(it.tDelta)
|
||||||
|
|
||||||
return it.readValue()
|
return it.readValue()
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 {
|
||||||
curRange := minSize
|
curRange := minSize
|
||||||
for i := 0; i < steps; i++ {
|
for i := 0; i < steps; i++ {
|
||||||
ranges = append(ranges, curRange)
|
ranges = append(ranges, curRange)
|
||||||
curRange = curRange * int64(stepSize)
|
curRange *= int64(stepSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ranges
|
return ranges
|
||||||
|
|
|
@ -1452,12 +1452,6 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
|
||||||
{100, 15, 3, 5},
|
{100, 15, 3, 5},
|
||||||
{100, 50, 3, 3},
|
{100, 50, 3, 3},
|
||||||
{100, 100, 3, 2},
|
{100, 100, 3, 2},
|
||||||
//{1000, 15, 1, 0},
|
|
||||||
//{1000, 50, 1, 0},
|
|
||||||
//{1000, 100, 1, 0},
|
|
||||||
//{1000, 15, 3, 5},
|
|
||||||
//{1000, 50, 3, 3},
|
|
||||||
//{1000, 100, 3, 2},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type testSummary struct {
|
type testSummary struct {
|
||||||
|
|
|
@ -260,7 +260,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
|
||||||
Help: "Size of symbol table in memory for loaded blocks",
|
Help: "Size of symbol table in memory for loaded blocks",
|
||||||
}, func() float64 {
|
}, func() float64 {
|
||||||
db.mtx.RLock()
|
db.mtx.RLock()
|
||||||
blocks := db.blocks[:]
|
blocks := db.blocks
|
||||||
db.mtx.RUnlock()
|
db.mtx.RUnlock()
|
||||||
symTblSize := uint64(0)
|
symTblSize := uint64(0)
|
||||||
for _, b := range blocks {
|
for _, b := range blocks {
|
||||||
|
@ -1187,7 +1187,7 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t = t + blockSize {
|
for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize {
|
||||||
mint, maxt := t, t+blockSize
|
mint, maxt := t, t+blockSize
|
||||||
// Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes.
|
// Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes.
|
||||||
uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, nil)
|
uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, nil)
|
||||||
|
@ -1509,7 +1509,7 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc
|
||||||
blocksSize := db.Head().Size()
|
blocksSize := db.Head().Size()
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
blocksSize += block.Size()
|
blocksSize += block.Size()
|
||||||
if blocksSize > int64(db.opts.MaxBytes) {
|
if blocksSize > db.opts.MaxBytes {
|
||||||
// Add this and all following blocks for deletion.
|
// Add this and all following blocks for deletion.
|
||||||
for _, b := range blocks[i:] {
|
for _, b := range blocks[i:] {
|
||||||
deletable[b.meta.ULID] = struct{}{}
|
deletable[b.meta.ULID] = struct{}{}
|
||||||
|
|
|
@ -1076,7 +1076,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
|
||||||
|
|
||||||
dbDir := db.Dir()
|
dbDir := db.Dir()
|
||||||
require.NoError(t, db.Close())
|
require.NoError(t, db.Close())
|
||||||
testFunc(dbDir, int(opts.WALSegmentSize))
|
testFunc(dbDir, opts.WALSegmentSize)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2996,7 +2996,7 @@ func TestCompactHead(t *testing.T) {
|
||||||
series = seriesSet.At().Iterator(series)
|
series = seriesSet.At().Iterator(series)
|
||||||
for series.Next() == chunkenc.ValFloat {
|
for series.Next() == chunkenc.ValFloat {
|
||||||
time, val := series.At()
|
time, val := series.At()
|
||||||
actSamples = append(actSamples, sample{int64(time), val, nil, nil})
|
actSamples = append(actSamples, sample{time, val, nil, nil})
|
||||||
}
|
}
|
||||||
require.NoError(t, series.Err())
|
require.NoError(t, series.Err())
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,7 +151,7 @@ func (ce *CircularExemplarStorage) Querier(_ context.Context) (storage.ExemplarQ
|
||||||
func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) {
|
func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) {
|
||||||
ret := make([]exemplar.QueryResult, 0)
|
ret := make([]exemplar.QueryResult, 0)
|
||||||
|
|
||||||
if len(ce.exemplars) <= 0 {
|
if len(ce.exemplars) == 0 {
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,7 +219,7 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.
|
||||||
// Not thread safe. The append parameters tells us whether this is an external validation, or internal
|
// Not thread safe. The append parameters tells us whether this is an external validation, or internal
|
||||||
// as a result of an AddExemplar call, in which case we should update any relevant metrics.
|
// as a result of an AddExemplar call, in which case we should update any relevant metrics.
|
||||||
func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error {
|
func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error {
|
||||||
if len(ce.exemplars) <= 0 {
|
if len(ce.exemplars) == 0 {
|
||||||
return storage.ErrExemplarsDisabled
|
return storage.ErrExemplarsDisabled
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,7 +334,7 @@ func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error {
|
func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error {
|
||||||
if len(ce.exemplars) <= 0 {
|
if len(ce.exemplars) == 0 {
|
||||||
return storage.ErrExemplarsDisabled
|
return storage.ErrExemplarsDisabled
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1453,7 +1453,7 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, s := range stones {
|
for _, s := range stones {
|
||||||
h.tombstones.AddInterval(storage.SeriesRef(s.Ref), s.Intervals[0])
|
h.tombstones.AddInterval(s.Ref, s.Intervals[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -3007,7 +3007,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
||||||
hists = tsdbutil.GenerateTestHistograms(numHistograms)
|
hists = tsdbutil.GenerateTestHistograms(numHistograms)
|
||||||
}
|
}
|
||||||
for _, h := range hists {
|
for _, h := range hists {
|
||||||
h.Count = h.Count * 2
|
h.Count *= 2
|
||||||
h.NegativeSpans = h.PositiveSpans
|
h.NegativeSpans = h.PositiveSpans
|
||||||
h.NegativeBuckets = h.PositiveBuckets
|
h.NegativeBuckets = h.PositiveBuckets
|
||||||
_, err := app.AppendHistogram(0, s1, ts, h, nil)
|
_, err := app.AppendHistogram(0, s1, ts, h, nil)
|
||||||
|
@ -3030,7 +3030,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
||||||
hists = tsdbutil.GenerateTestFloatHistograms(numHistograms)
|
hists = tsdbutil.GenerateTestFloatHistograms(numHistograms)
|
||||||
}
|
}
|
||||||
for _, h := range hists {
|
for _, h := range hists {
|
||||||
h.Count = h.Count * 2
|
h.Count *= 2
|
||||||
h.NegativeSpans = h.PositiveSpans
|
h.NegativeSpans = h.PositiveSpans
|
||||||
h.NegativeBuckets = h.PositiveBuckets
|
h.NegativeBuckets = h.PositiveBuckets
|
||||||
_, err := app.AppendHistogram(0, s1, ts, nil, h)
|
_, err := app.AppendHistogram(0, s1, ts, nil, h)
|
||||||
|
@ -3071,26 +3071,26 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, h := range hists {
|
for _, h := range hists {
|
||||||
ts++
|
ts++
|
||||||
h.Count = h.Count * 2
|
h.Count *= 2
|
||||||
h.NegativeSpans = h.PositiveSpans
|
h.NegativeSpans = h.PositiveSpans
|
||||||
h.NegativeBuckets = h.PositiveBuckets
|
h.NegativeBuckets = h.PositiveBuckets
|
||||||
_, err := app.AppendHistogram(0, s2, int64(ts), h, nil)
|
_, err := app.AppendHistogram(0, s2, ts, h, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
eh := h.Copy()
|
eh := h.Copy()
|
||||||
if !gauge && ts > 30 && (ts-10)%20 == 1 {
|
if !gauge && ts > 30 && (ts-10)%20 == 1 {
|
||||||
// Need "unknown" hint after float sample.
|
// Need "unknown" hint after float sample.
|
||||||
eh.CounterResetHint = histogram.UnknownCounterReset
|
eh.CounterResetHint = histogram.UnknownCounterReset
|
||||||
}
|
}
|
||||||
exp[k2] = append(exp[k2], sample{t: int64(ts), h: eh})
|
exp[k2] = append(exp[k2], sample{t: ts, h: eh})
|
||||||
if ts%20 == 0 {
|
if ts%20 == 0 {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
app = head.Appender(context.Background())
|
app = head.Appender(context.Background())
|
||||||
// Add some float.
|
// Add some float.
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
ts++
|
ts++
|
||||||
_, err := app.Append(0, s2, int64(ts), float64(ts))
|
_, err := app.Append(0, s2, ts, float64(ts))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
exp[k2] = append(exp[k2], sample{t: int64(ts), f: float64(ts)})
|
exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)})
|
||||||
}
|
}
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
app = head.Appender(context.Background())
|
app = head.Appender(context.Background())
|
||||||
|
@ -3108,26 +3108,26 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, h := range hists {
|
for _, h := range hists {
|
||||||
ts++
|
ts++
|
||||||
h.Count = h.Count * 2
|
h.Count *= 2
|
||||||
h.NegativeSpans = h.PositiveSpans
|
h.NegativeSpans = h.PositiveSpans
|
||||||
h.NegativeBuckets = h.PositiveBuckets
|
h.NegativeBuckets = h.PositiveBuckets
|
||||||
_, err := app.AppendHistogram(0, s2, int64(ts), nil, h)
|
_, err := app.AppendHistogram(0, s2, ts, nil, h)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
eh := h.Copy()
|
eh := h.Copy()
|
||||||
if !gauge && ts > 30 && (ts-10)%20 == 1 {
|
if !gauge && ts > 30 && (ts-10)%20 == 1 {
|
||||||
// Need "unknown" hint after float sample.
|
// Need "unknown" hint after float sample.
|
||||||
eh.CounterResetHint = histogram.UnknownCounterReset
|
eh.CounterResetHint = histogram.UnknownCounterReset
|
||||||
}
|
}
|
||||||
exp[k2] = append(exp[k2], sample{t: int64(ts), fh: eh})
|
exp[k2] = append(exp[k2], sample{t: ts, fh: eh})
|
||||||
if ts%20 == 0 {
|
if ts%20 == 0 {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
app = head.Appender(context.Background())
|
app = head.Appender(context.Background())
|
||||||
// Add some float.
|
// Add some float.
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
ts++
|
ts++
|
||||||
_, err := app.Append(0, s2, int64(ts), float64(ts))
|
_, err := app.Append(0, s2, ts, float64(ts))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
exp[k2] = append(exp[k2], sample{t: int64(ts), f: float64(ts)})
|
exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)})
|
||||||
}
|
}
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
app = head.Appender(context.Background())
|
app = head.Appender(context.Background())
|
||||||
|
@ -4497,7 +4497,6 @@ func TestHistogramValidation(t *testing.T) {
|
||||||
default:
|
default:
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch err := ValidateFloatHistogram(tc.h.ToFloat()); {
|
switch err := ValidateFloatHistogram(tc.h.ToFloat()); {
|
||||||
case tc.errMsgFloat != "":
|
case tc.errMsgFloat != "":
|
||||||
require.ErrorContains(t, err, tc.errMsgFloat)
|
require.ErrorContains(t, err, tc.errMsgFloat)
|
||||||
|
|
|
@ -300,7 +300,7 @@ Outer:
|
||||||
unknownRefs.Inc()
|
unknownRefs.Inc()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
h.tombstones.AddInterval(storage.SeriesRef(s.Ref), itv)
|
h.tombstones.AddInterval(s.Ref, itv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tstonesPool.Put(v)
|
tstonesPool.Put(v)
|
||||||
|
@ -383,7 +383,7 @@ Outer:
|
||||||
floatHistogramsPool.Put(v)
|
floatHistogramsPool.Put(v)
|
||||||
case []record.RefMetadata:
|
case []record.RefMetadata:
|
||||||
for _, m := range v {
|
for _, m := range v {
|
||||||
s := h.series.getByID(chunks.HeadSeriesRef(m.Ref))
|
s := h.series.getByID(m.Ref)
|
||||||
if s == nil {
|
if s == nil {
|
||||||
unknownMetadataRefs.Inc()
|
unknownMetadataRefs.Inc()
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -536,7 +536,7 @@ func (w *Writer) finishSymbols() error {
|
||||||
// Write out the length and symbol count.
|
// Write out the length and symbol count.
|
||||||
w.buf1.Reset()
|
w.buf1.Reset()
|
||||||
w.buf1.PutBE32int(int(symbolTableSize))
|
w.buf1.PutBE32int(int(symbolTableSize))
|
||||||
w.buf1.PutBE32int(int(w.numSymbols))
|
w.buf1.PutBE32int(w.numSymbols)
|
||||||
if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil {
|
if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -702,7 +702,6 @@ func (rp *removedPostings) Next() bool {
|
||||||
rp.fok = rp.full.Next()
|
rp.fok = rp.full.Next()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
switch fcur, rcur := rp.full.At(), rp.remove.At(); {
|
switch fcur, rcur := rp.full.At(), rp.remove.At(); {
|
||||||
case fcur < rcur:
|
case fcur < rcur:
|
||||||
rp.cur = fcur
|
rp.cur = fcur
|
||||||
|
|
|
@ -31,10 +31,10 @@ type maxHeap struct {
|
||||||
Items []Stat
|
Items []Stat
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *maxHeap) init(len int) {
|
func (m *maxHeap) init(length int) {
|
||||||
m.maxLength = len
|
m.maxLength = length
|
||||||
m.minValue = math.MaxUint64
|
m.minValue = math.MaxUint64
|
||||||
m.Items = make([]Stat, 0, len)
|
m.Items = make([]Stat, 0, length)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *maxHeap) push(item Stat) {
|
func (m *maxHeap) push(item Stat) {
|
||||||
|
|
|
@ -254,7 +254,7 @@ func (txr *txRing) add(appendID uint64) {
|
||||||
if txr.txIDCount == len(txr.txIDs) {
|
if txr.txIDCount == len(txr.txIDs) {
|
||||||
// Ring buffer is full, expand by doubling.
|
// Ring buffer is full, expand by doubling.
|
||||||
newRing := make([]uint64, txr.txIDCount*2)
|
newRing := make([]uint64, txr.txIDCount*2)
|
||||||
idx := copy(newRing[:], txr.txIDs[txr.txIDFirst:])
|
idx := copy(newRing, txr.txIDs[txr.txIDFirst:])
|
||||||
copy(newRing[idx:], txr.txIDs[:txr.txIDFirst])
|
copy(newRing[idx:], txr.txIDs[:txr.txIDFirst])
|
||||||
txr.txIDs = newRing
|
txr.txIDs = newRing
|
||||||
txr.txIDFirst = 0
|
txr.txIDFirst = 0
|
||||||
|
|
|
@ -967,7 +967,6 @@ func (m *mergedStringIter) Next() bool {
|
||||||
if (!m.aok && !m.bok) || (m.Err() != nil) {
|
if (!m.aok && !m.bok) || (m.Err() != nil) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case !m.aok:
|
case !m.aok:
|
||||||
m.cur = m.b.At()
|
m.cur = m.b.At()
|
||||||
|
|
|
@ -114,7 +114,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe
|
||||||
|
|
||||||
var chunkRef chunks.ChunkRef
|
var chunkRef chunks.ChunkRef
|
||||||
for i, s := range tc {
|
for i, s := range tc {
|
||||||
i = i + 1 // 0 is not a valid posting.
|
i++ // 0 is not a valid posting.
|
||||||
metas := make([]chunks.Meta, 0, len(s.chunks))
|
metas := make([]chunks.Meta, 0, len(s.chunks))
|
||||||
for _, chk := range s.chunks {
|
for _, chk := range s.chunks {
|
||||||
if chk[0].t < blockMint {
|
if chk[0].t < blockMint {
|
||||||
|
@ -2013,7 +2013,7 @@ func BenchmarkQueries(b *testing.B) {
|
||||||
for x := 0; x <= 10; x++ {
|
for x := 0; x <= 10; x++ {
|
||||||
block, err := OpenBlock(nil, createBlock(b, dir, series), nil)
|
block, err := OpenBlock(nil, createBlock(b, dir, series), nil)
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
q, err := NewBlockQuerier(block, 1, int64(nSamples))
|
q, err := NewBlockQuerier(block, 1, nSamples)
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
qs = append(qs, q)
|
qs = append(qs, q)
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ func newWalMetrics(r prometheus.Registerer) *walMetrics {
|
||||||
// WAL is a write ahead log that can log new series labels and samples.
|
// WAL is a write ahead log that can log new series labels and samples.
|
||||||
// It must be completely read before new entries are logged.
|
// It must be completely read before new entries are logged.
|
||||||
//
|
//
|
||||||
// DEPRECATED: use wlog pkg combined with the record codex instead.
|
// Deprecated: use wlog pkg combined with the record codex instead.
|
||||||
type WAL interface {
|
type WAL interface {
|
||||||
Reader() WALReader
|
Reader() WALReader
|
||||||
LogSeries([]record.RefSeries) error
|
LogSeries([]record.RefSeries) error
|
||||||
|
@ -148,7 +148,7 @@ func newCRC32() hash.Hash32 {
|
||||||
|
|
||||||
// SegmentWAL is a write ahead log for series data.
|
// SegmentWAL is a write ahead log for series data.
|
||||||
//
|
//
|
||||||
// DEPRECATED: use wlog pkg combined with the record coders instead.
|
// Deprecated: use wlog pkg combined with the record coders instead.
|
||||||
type SegmentWAL struct {
|
type SegmentWAL struct {
|
||||||
mtx sync.Mutex
|
mtx sync.Mutex
|
||||||
metrics *walMetrics
|
metrics *walMetrics
|
||||||
|
|
|
@ -428,10 +428,10 @@ func TestLogPartialWrite(t *testing.T) {
|
||||||
faultyRecord: pageSize / (recordHeaderSize + len(record)),
|
faultyRecord: pageSize / (recordHeaderSize + len(record)),
|
||||||
},
|
},
|
||||||
// TODO the current implementation suffers this:
|
// TODO the current implementation suffers this:
|
||||||
//"partial write when logging a record overlapping two pages": {
|
// "partial write when logging a record overlapping two pages": {
|
||||||
// numRecords: (pageSize / (recordHeaderSize + len(record))) + 10,
|
// numRecords: (pageSize / (recordHeaderSize + len(record))) + 10,
|
||||||
// faultyRecord: pageSize/(recordHeaderSize+len(record)) + 1,
|
// faultyRecord: pageSize/(recordHeaderSize+len(record)) + 1,
|
||||||
//},
|
// },
|
||||||
}
|
}
|
||||||
|
|
||||||
for testName, testData := range tests {
|
for testName, testData := range tests {
|
||||||
|
|
|
@ -39,7 +39,7 @@ func getLimits(resource int, unit string) string {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("syscall.Getrlimit failed: " + err.Error())
|
panic("syscall.Getrlimit failed: " + err.Error())
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(uint64(rlimit.Cur), unit), limitToString(uint64(rlimit.Max), unit))
|
return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(rlimit.Cur, unit), limitToString(rlimit.Max, unit))
|
||||||
}
|
}
|
||||||
|
|
||||||
// FdLimits returns the soft and hard limits for file descriptors.
|
// FdLimits returns the soft and hard limits for file descriptors.
|
||||||
|
|
|
@ -72,11 +72,13 @@ func Statfs(path string) string {
|
||||||
|
|
||||||
var fs syscall.Statfs_t
|
var fs syscall.Statfs_t
|
||||||
err := syscall.Statfs(path, &fs)
|
err := syscall.Statfs(path, &fs)
|
||||||
|
//nolint:unconvert // This ensure Type format on all Platforms
|
||||||
|
localType := int64(fs.Type)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return strconv.FormatInt(int64(fs.Type), 16)
|
return strconv.FormatInt(localType, 16)
|
||||||
}
|
}
|
||||||
if fsType, ok := fsTypes[int64(fs.Type)]; ok {
|
if fsType, ok := fsTypes[localType]; ok {
|
||||||
return fsType
|
return fsType
|
||||||
}
|
}
|
||||||
return strconv.FormatInt(int64(fs.Type), 16)
|
return strconv.FormatInt(localType, 16)
|
||||||
}
|
}
|
||||||
|
|
|
@ -243,7 +243,7 @@ func NewAPI(
|
||||||
remoteReadConcurrencyLimit int,
|
remoteReadConcurrencyLimit int,
|
||||||
remoteReadMaxBytesInFrame int,
|
remoteReadMaxBytesInFrame int,
|
||||||
isAgent bool,
|
isAgent bool,
|
||||||
CORSOrigin *regexp.Regexp,
|
corsOrigin *regexp.Regexp,
|
||||||
runtimeInfo func() (RuntimeInfo, error),
|
runtimeInfo func() (RuntimeInfo, error),
|
||||||
buildInfo *PrometheusVersion,
|
buildInfo *PrometheusVersion,
|
||||||
gatherer prometheus.Gatherer,
|
gatherer prometheus.Gatherer,
|
||||||
|
@ -269,7 +269,7 @@ func NewAPI(
|
||||||
enableAdmin: enableAdmin,
|
enableAdmin: enableAdmin,
|
||||||
rulesRetriever: rr,
|
rulesRetriever: rr,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
CORSOrigin: CORSOrigin,
|
CORSOrigin: corsOrigin,
|
||||||
runtimeInfo: runtimeInfo,
|
runtimeInfo: runtimeInfo,
|
||||||
buildInfo: buildInfo,
|
buildInfo: buildInfo,
|
||||||
gatherer: gatherer,
|
gatherer: gatherer,
|
||||||
|
|
|
@ -2850,7 +2850,7 @@ func TestRespondSuccess(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var res response
|
var res response
|
||||||
if err = json.Unmarshal([]byte(body), &res); err != nil {
|
if err = json.Unmarshal(body, &res); err != nil {
|
||||||
t.Fatalf("Error unmarshaling JSON body: %s", err)
|
t.Fatalf("Error unmarshaling JSON body: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2886,7 +2886,7 @@ func TestRespondError(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var res response
|
var res response
|
||||||
if err = json.Unmarshal([]byte(body), &res); err != nil {
|
if err = json.Unmarshal(body, &res); err != nil {
|
||||||
t.Fatalf("Error unmarshaling JSON body: %s", err)
|
t.Fatalf("Error unmarshaling JSON body: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -719,9 +719,9 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
|
||||||
}
|
}
|
||||||
if h.options.TSDBMaxBytes != 0 {
|
if h.options.TSDBMaxBytes != 0 {
|
||||||
if status.StorageRetention != "" {
|
if status.StorageRetention != "" {
|
||||||
status.StorageRetention = status.StorageRetention + " or "
|
status.StorageRetention += " or "
|
||||||
}
|
}
|
||||||
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
|
status.StorageRetention += h.options.TSDBMaxBytes.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics, err := h.gatherer.Gather()
|
metrics, err := h.gatherer.Gather()
|
||||||
|
|
Loading…
Reference in a new issue