Merge pull request #12241 from mmorel-35/linter/nilerr

enable gocritic, unconvert and unused linters
This commit is contained in:
Julien Pivotto 2023-04-20 15:13:31 +02:00 committed by GitHub
commit e2512078e5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
56 changed files with 120 additions and 127 deletions

View file

@ -149,6 +149,8 @@ jobs:
- name: Lint
uses: golangci/golangci-lint-action@v3.4.0
with:
args: --verbose
skip-cache: true
version: v1.51.2
fuzzing:
uses: ./.github/workflows/fuzzing.yml

View file

@ -1,5 +1,5 @@
run:
deadline: 5m
timeout: 15m
skip-files:
# Skip autogenerated files.
- ^.*\.(pb|y)\.go$
@ -10,14 +10,23 @@ output:
linters:
enable:
- depguard
- gocritic
- gofumpt
- goimports
- revive
- misspell
- unconvert
- unused
issues:
max-same-issues: 0
exclude-rules:
- linters:
- gocritic
text: "appendAssign"
- linters:
- gocritic
text: "singleCaseSwitch"
- path: _test.go
linters:
- errcheck

View file

@ -491,7 +491,7 @@ func main() {
if cfgFile.StorageConfig.ExemplarsConfig == nil {
cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig
}
cfg.tsdb.MaxExemplars = int64(cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars)
cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars
}
if cfgFile.StorageConfig.TSDBConfig != nil {
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow

View file

@ -193,7 +193,7 @@ func (p *queryLogTest) String() string {
}
name = name + ", " + p.host + ":" + strconv.Itoa(p.port)
if p.enabledAtStart {
name = name + ", enabled at start"
name += ", enabled at start"
}
if p.prefix != "" {
name = name + ", with prefix " + p.prefix

View file

@ -101,7 +101,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
nextSampleTs int64 = math.MaxInt64
)
for t := mint; t <= maxt; t = t + blockDuration {
for t := mint; t <= maxt; t += blockDuration {
tsUpper := t + blockDuration
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
// The next sample is not in this timerange, we can avoid parsing

View file

@ -100,7 +100,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
startInMs := start.Unix() * int64(time.Second/time.Millisecond)
endInMs := end.Unix() * int64(time.Second/time.Millisecond)
for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock = startOfBlock + blockDuration {
for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock += blockDuration {
endOfBlock := startOfBlock + blockDuration - 1
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())

View file

@ -130,7 +130,7 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
if err != nil {
return err
}
if len(m) <= 0 {
if len(m) == 0 {
fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf)
}
globbedFiles = append(globbedFiles, m...)

View file

@ -300,7 +300,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
}
if port.protocol() != nil {
target[endpointSlicePortProtocolLabel] = lv(string(*port.protocol()))
target[endpointSlicePortProtocolLabel] = lv(*port.protocol())
}
if port.port() != nil {

View file

@ -254,7 +254,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
oldStr := oldTyp.String()
newStr := newTyp.String()
for i, s := range e.Errors {
e.Errors[i] = strings.Replace(s, oldStr, newStr, -1)
e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
}
}
return err

View file

@ -249,7 +249,6 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
if detailedIP.Address != ip.String() {
continue
}
switch {
case detailedIP.Public && publicIPv4 == "":
publicIPv4 = detailedIP.Address

View file

@ -84,7 +84,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
return
}
w.Header().Set("Content-Type", "application/json")
if string(r.URL.Path) == "/dedicated/server" {
if r.URL.Path == "/dedicated/server" {
dedicatedServersList, err := os.ReadFile("testdata/dedicated_server/dedicated_servers.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@ -96,7 +96,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
return
}
}
if string(r.URL.Path) == "/dedicated/server/abcde" {
if r.URL.Path == "/dedicated/server/abcde" {
dedicatedServer, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_details.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@ -108,7 +108,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
return
}
}
if string(r.URL.Path) == "/dedicated/server/abcde/ips" {
if r.URL.Path == "/dedicated/server/abcde/ips" {
dedicatedServerIPs, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_abcde_ips.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)

View file

@ -91,7 +91,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
return
}
w.Header().Set("Content-Type", "application/json")
if string(r.URL.Path) == "/vps" {
if r.URL.Path == "/vps" {
dedicatedServersList, err := os.ReadFile("testdata/vps/vps.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@ -103,7 +103,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
return
}
}
if string(r.URL.Path) == "/vps/abc" {
if r.URL.Path == "/vps/abc" {
dedicatedServer, err := os.ReadFile("testdata/vps/vps_details.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@ -115,7 +115,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
return
}
}
if string(r.URL.Path) == "/vps/abc/ips" {
if r.URL.Path == "/vps/abc/ips" {
dedicatedServerIPs, err := os.ReadFile("testdata/vps/vps_abc_ips.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)

View file

@ -253,7 +253,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
oldStr := oldTyp.String()
newStr := newTyp.String()
for i, s := range e.Errors {
e.Errors[i] = strings.Replace(s, oldStr, newStr, -1)
e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
}
}
return err

View file

@ -184,11 +184,11 @@ func (c *Client) buildCommand(q *prompb.Query) (string, error) {
}
func escapeSingleQuotes(str string) string {
return strings.Replace(str, `'`, `\'`, -1)
return strings.ReplaceAll(str, `'`, `\'`)
}
func escapeSlashes(str string) string {
return strings.Replace(str, `/`, `\/`, -1)
return strings.ReplaceAll(str, `/`, `\/`)
}
func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, results []influx.Result) error {

View file

@ -512,7 +512,7 @@ func BenchmarkGzip(b *testing.B) {
k := b.N / promtestdataSampleCount
b.ReportAllocs()
b.SetBytes(int64(n) / promtestdataSampleCount)
b.SetBytes(n / promtestdataSampleCount)
b.ResetTimer()
total := 0

View file

@ -194,9 +194,9 @@ func rangeQueryCases() []benchCase {
if !strings.Contains(c.expr, "X") {
tmp = append(tmp, c)
} else {
tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "one", -1), steps: c.steps})
tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "ten", -1), steps: c.steps})
tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "hundred", -1), steps: c.steps})
tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "one"), steps: c.steps})
tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "ten"), steps: c.steps})
tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "hundred"), steps: c.steps})
}
}
cases = tmp

View file

@ -783,7 +783,6 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) {
maxTimestamp = end
}
evalRange = 0
case *parser.MatrixSelector:
evalRange = n.Range
}
@ -816,20 +815,20 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS
} else {
offsetMilliseconds := durationMilliseconds(subqOffset)
start = start - offsetMilliseconds - durationMilliseconds(subqRange)
end = end - offsetMilliseconds
end -= offsetMilliseconds
}
if evalRange == 0 {
start = start - durationMilliseconds(s.LookbackDelta)
start -= durationMilliseconds(s.LookbackDelta)
} else {
// For all matrix queries we want to ensure that we have (end-start) + range selected
// this way we have `range` data before the start time
start = start - durationMilliseconds(evalRange)
start -= durationMilliseconds(evalRange)
}
offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
start = start - offsetMilliseconds
end = end - offsetMilliseconds
start -= offsetMilliseconds
end -= offsetMilliseconds
return start, end
}
@ -1745,7 +1744,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
res, ws := newEv.eval(e.Expr)
ev.currentSamples = newEv.currentSamples
ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats)
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts = ts + ev.interval {
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval {
step++
ev.samplesStats.IncrementSamplesAtStep(step, newEv.samplesStats.TotalSamples)
}
@ -1767,7 +1766,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
if len(mat[i].Floats)+len(mat[i].Histograms) != 1 {
panic(fmt.Errorf("unexpected number of samples"))
}
for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts = ts + ev.interval {
for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval {
if len(mat[i].Floats) > 0 {
mat[i].Floats = append(mat[i].Floats, FPoint{
T: ts,
@ -2695,7 +2694,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
aggr.floatValue = float64(aggr.groupCount)
case parser.STDVAR:
aggr.floatValue = aggr.floatValue / float64(aggr.groupCount)
aggr.floatValue /= float64(aggr.groupCount)
case parser.STDDEV:
aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount))

View file

@ -3269,7 +3269,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) {
require.Len(t, vector, 1)
require.Nil(t, vector[0].H)
if floatHisto {
require.Equal(t, float64(h.ToFloat().Count), vector[0].F)
require.Equal(t, h.ToFloat().Count, vector[0].F)
} else {
require.Equal(t, float64(h.Count), vector[0].F)
}

View file

@ -880,10 +880,10 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f
}
return 0, initY
}
sumX = sumX + cX
sumY = sumY + cY
sumXY = sumXY + cXY
sumX2 = sumX2 + cX2
sumX += cX
sumY += cY
sumXY += cXY
sumX2 += cX2
covXY := sumXY - sumX*sumY/n
varX := sumX2 - sumX*sumX/n

View file

@ -51,7 +51,7 @@ func TestDeriv(t *testing.T) {
// https://github.com/prometheus/prometheus/issues/7180
for i = 0; i < 15; i++ {
jitter := 12 * i % 2
a.Append(0, metric, int64(start+interval*i+jitter), 1)
a.Append(0, metric, start+interval*i+jitter, 1)
}
require.NoError(t, a.Commit())

View file

@ -349,7 +349,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
// for all the non-nil children of node, recursively.
func Inspect(node Node, f inspector) {
//nolint: errcheck
Walk(inspector(f), node, nil)
Walk(f, node, nil)
}
// Children returns a list of all child nodes of a syntax tree node.

View file

@ -3592,7 +3592,7 @@ func TestNaNExpression(t *testing.T) {
nl, ok := expr.(*NumberLiteral)
require.True(t, ok, "expected number literal but got %T", expr)
require.True(t, math.IsNaN(float64(nl.Val)), "expected 'NaN' in number literal but got %v", nl.Val)
require.True(t, math.IsNaN(nl.Val), "expected 'NaN' in number literal but got %v", nl.Val)
}
var testSeries = []struct {

View file

@ -641,7 +641,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
met := lset.Get(labels.MetricName)
if limits.labelLimit > 0 {
nbLabels := lset.Len()
if nbLabels > int(limits.labelLimit) {
if nbLabels > limits.labelLimit {
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit)
}
}
@ -653,14 +653,14 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
return lset.Validate(func(l labels.Label) error {
if limits.labelNameLengthLimit > 0 {
nameLength := len(l.Name)
if nameLength > int(limits.labelNameLengthLimit) {
if nameLength > limits.labelNameLengthLimit {
return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit)
}
}
if limits.labelValueLengthLimit > 0 {
valueLength := len(l.Value)
if valueLength > int(limits.labelValueLengthLimit) {
if valueLength > limits.labelValueLengthLimit {
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit)
}
}

View file

@ -322,7 +322,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
ScrapeTimeout: model.Duration(2 * time.Second),
}
newLoop := func(opts scrapeLoopOptions) loop {
l := &testLoop{interval: time.Duration(opts.interval), timeout: time.Duration(opts.timeout)}
l := &testLoop{interval: opts.interval, timeout: opts.timeout}
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
require.Equal(t, 5*time.Second, interval, "Unexpected scrape interval")
require.Equal(t, 3*time.Second, timeout, "Unexpected scrape timeout")
@ -546,7 +546,7 @@ func TestScrapePoolRaces(t *testing.T) {
require.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets")
for i := 0; i < 20; i++ {
time.Sleep(time.Duration(10 * time.Millisecond))
time.Sleep(10 * time.Millisecond)
sp.reload(newConfig())
}
sp.stop()
@ -1201,7 +1201,6 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
// Succeed once, several failures, then stop.
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
numScrapes++
switch numScrapes {
case 1:
w.Write([]byte("metric_a 42\n"))
@ -1286,7 +1285,6 @@ func TestScrapeLoopCache(t *testing.T) {
}
numScrapes++
switch numScrapes {
case 1:
w.Write([]byte("metric_a 42\nmetric_b 43\n"))

View file

@ -413,9 +413,9 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
// Addresses reaching this point are already wrapped in [] if necessary.
switch scheme {
case "http", "":
addr = addr + ":80"
addr += ":80"
case "https":
addr = addr + ":443"
addr += ":443"
default:
return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme)
}

View file

@ -429,7 +429,6 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
c.histogramsCur += sort.Search(len(c.series.histograms)-c.histogramsCur, func(n int) bool {
return c.series.histograms[n+c.histogramsCur].Timestamp >= t
})
switch {
case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms):
// If float samples and histogram samples have overlapping timestamps prefer the float samples.
@ -452,7 +451,6 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
case c.histogramsCur < len(c.series.histograms):
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
}
return c.curValType
}
@ -516,7 +514,6 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
peekHistTS = c.series.histograms[c.histogramsCur+1].Timestamp
}
c.curValType = chunkenc.ValNone
switch {
case peekFloatTS < peekHistTS:
c.floatsCur++
@ -536,7 +533,6 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
c.histogramsCur++
c.curValType = chunkenc.ValFloat
}
return c.curValType
}

View file

@ -609,7 +609,7 @@ outer:
t.metrics.enqueueRetriesTotal.Inc()
time.Sleep(time.Duration(backoff))
backoff = backoff * 2
backoff *= 2
// It is reasonable to use t.cfg.MaxBackoff here, as if we have hit
// the full backoff we are likely waiting for external resources.
if backoff > t.cfg.MaxBackoff {
@ -660,7 +660,7 @@ outer:
t.metrics.enqueueRetriesTotal.Inc()
time.Sleep(time.Duration(backoff))
backoff = backoff * 2
backoff *= 2
if backoff > t.cfg.MaxBackoff {
backoff = t.cfg.MaxBackoff
}
@ -707,7 +707,7 @@ outer:
t.metrics.enqueueRetriesTotal.Inc()
time.Sleep(time.Duration(backoff))
backoff = backoff * 2
backoff *= 2
if backoff > t.cfg.MaxBackoff {
backoff = t.cfg.MaxBackoff
}
@ -754,7 +754,7 @@ outer:
t.metrics.enqueueRetriesTotal.Inc()
time.Sleep(time.Duration(backoff))
backoff = backoff * 2
backoff *= 2
if backoff > t.cfg.MaxBackoff {
backoff = t.cfg.MaxBackoff
}

View file

@ -421,7 +421,7 @@ func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr
}
}
}()
//nolint:unconvert // Before Go 1.19 conversion from text_template to html_template is mandatory
tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap))
tmpl.Option(te.options...)
tmpl.Funcs(html_template.FuncMap{

View file

@ -739,8 +739,7 @@ func TestStorage_DuplicateExemplarsIgnored(t *testing.T) {
var dec record.Decoder
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Exemplars:
if dec.Type(rec) == record.Exemplars {
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)

View file

@ -630,7 +630,7 @@ func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, flo
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0},
PositiveBuckets: []int64{ts + 1, 1, -1, 0},
}
if ts != mint {
// By setting the counter reset hint to "no counter
@ -669,7 +669,7 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0},
PositiveBuckets: []int64{ts + 1, 1, -1, 0},
}
if count > 1 && count%5 != 1 {
// Same rationale for this as above in

View file

@ -182,7 +182,7 @@ func (b *bstreamReader) readBits(nbits uint8) (uint64, error) {
}
bitmask = (uint64(1) << nbits) - 1
v = v | ((b.buffer >> (b.valid - nbits)) & bitmask)
v |= ((b.buffer >> (b.valid - nbits)) & bitmask)
b.valid -= nbits
return v, nil
@ -242,13 +242,13 @@ func (b *bstreamReader) loadNextBuffer(nbits uint8) bool {
if b.streamOffset+nbytes == len(b.stream) {
// There can be concurrent writes happening on the very last byte
// of the stream, so use the copy we took at initialization time.
buffer = buffer | uint64(b.last)
buffer |= uint64(b.last)
// Read up to the byte before
skip = 1
}
for i := 0; i < nbytes-skip; i++ {
buffer = buffer | (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1)))
buffer |= (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1)))
}
b.buffer = buffer

View file

@ -785,7 +785,7 @@ func (it *floatHistogramIterator) Next() ValueType {
it.err = err
return ValNone
}
it.tDelta = it.tDelta + tDod
it.tDelta += tDod
it.t += it.tDelta
if ok := it.readXor(&it.cnt.value, &it.cnt.leading, &it.cnt.trailing); !ok {

View file

@ -875,7 +875,7 @@ func (it *histogramIterator) Next() ValueType {
it.err = err
return ValNone
}
it.tDelta = it.tDelta + tDod
it.tDelta += tDod
it.t += it.tDelta
cntDod, err := readVarbitInt(&it.br)
@ -883,7 +883,7 @@ func (it *histogramIterator) Next() ValueType {
it.err = err
return ValNone
}
it.cntDelta = it.cntDelta + cntDod
it.cntDelta += cntDod
it.cnt = uint64(int64(it.cnt) + it.cntDelta)
zcntDod, err := readVarbitInt(&it.br)
@ -891,7 +891,7 @@ func (it *histogramIterator) Next() ValueType {
it.err = err
return ValNone
}
it.zCntDelta = it.zCntDelta + zcntDod
it.zCntDelta += zcntDod
it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta)
ok := it.readSum()

View file

@ -122,7 +122,7 @@ func readVarbitInt(b *bstreamReader) (int64, error) {
}
if bits > (1 << (sz - 1)) {
// Or something.
bits = bits - (1 << sz)
bits -= (1 << sz)
}
val = int64(bits)
}

View file

@ -163,7 +163,6 @@ func (a *xorAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) {
func (a *xorAppender) Append(t int64, v float64) {
var tDelta uint64
num := binary.BigEndian.Uint16(a.b.bytes())
switch num {
case 0:
buf := make([]byte, binary.MaxVarintLen64)
@ -171,7 +170,6 @@ func (a *xorAppender) Append(t int64, v float64) {
a.b.writeByte(b)
}
a.b.writeBits(math.Float64bits(v), 64)
case 1:
tDelta = uint64(t - a.t)
@ -322,7 +320,7 @@ func (it *xorIterator) Next() ValueType {
return ValNone
}
it.tDelta = tDelta
it.t = it.t + int64(it.tDelta)
it.t += int64(it.tDelta)
return it.readValue()
}
@ -385,7 +383,7 @@ func (it *xorIterator) Next() ValueType {
}
it.tDelta = uint64(int64(it.tDelta) + dod)
it.t = it.t + int64(it.tDelta)
it.t += int64(it.tDelta)
return it.readValue()
}

View file

@ -44,7 +44,7 @@ func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 {
curRange := minSize
for i := 0; i < steps; i++ {
ranges = append(ranges, curRange)
curRange = curRange * int64(stepSize)
curRange *= int64(stepSize)
}
return ranges

View file

@ -1452,12 +1452,6 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
{100, 15, 3, 5},
{100, 50, 3, 3},
{100, 100, 3, 2},
//{1000, 15, 1, 0},
//{1000, 50, 1, 0},
//{1000, 100, 1, 0},
//{1000, 15, 3, 5},
//{1000, 50, 3, 3},
//{1000, 100, 3, 2},
}
type testSummary struct {

View file

@ -260,7 +260,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
Help: "Size of symbol table in memory for loaded blocks",
}, func() float64 {
db.mtx.RLock()
blocks := db.blocks[:]
blocks := db.blocks
db.mtx.RUnlock()
symTblSize := uint64(0)
for _, b := range blocks {
@ -1187,7 +1187,7 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID
}
}()
for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t = t + blockSize {
for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize {
mint, maxt := t, t+blockSize
// Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes.
uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, nil)
@ -1509,7 +1509,7 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc
blocksSize := db.Head().Size()
for i, block := range blocks {
blocksSize += block.Size()
if blocksSize > int64(db.opts.MaxBytes) {
if blocksSize > db.opts.MaxBytes {
// Add this and all following blocks for deletion.
for _, b := range blocks[i:] {
deletable[b.meta.ULID] = struct{}{}

View file

@ -1076,7 +1076,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
dbDir := db.Dir()
require.NoError(t, db.Close())
testFunc(dbDir, int(opts.WALSegmentSize))
testFunc(dbDir, opts.WALSegmentSize)
})
}
}
@ -2996,7 +2996,7 @@ func TestCompactHead(t *testing.T) {
series = seriesSet.At().Iterator(series)
for series.Next() == chunkenc.ValFloat {
time, val := series.At()
actSamples = append(actSamples, sample{int64(time), val, nil, nil})
actSamples = append(actSamples, sample{time, val, nil, nil})
}
require.NoError(t, series.Err())
}

View file

@ -151,7 +151,7 @@ func (ce *CircularExemplarStorage) Querier(_ context.Context) (storage.ExemplarQ
func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) {
ret := make([]exemplar.QueryResult, 0)
if len(ce.exemplars) <= 0 {
if len(ce.exemplars) == 0 {
return ret, nil
}
@ -219,7 +219,7 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.
// Not thread safe. The append parameters tells us whether this is an external validation, or internal
// as a result of an AddExemplar call, in which case we should update any relevant metrics.
func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error {
if len(ce.exemplars) <= 0 {
if len(ce.exemplars) == 0 {
return storage.ErrExemplarsDisabled
}
@ -334,7 +334,7 @@ func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry) {
}
func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error {
if len(ce.exemplars) <= 0 {
if len(ce.exemplars) == 0 {
return storage.ErrExemplarsDisabled
}

View file

@ -1453,7 +1453,7 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
}
}
for _, s := range stones {
h.tombstones.AddInterval(storage.SeriesRef(s.Ref), s.Intervals[0])
h.tombstones.AddInterval(s.Ref, s.Intervals[0])
}
return nil

View file

@ -3007,7 +3007,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
hists = tsdbutil.GenerateTestHistograms(numHistograms)
}
for _, h := range hists {
h.Count = h.Count * 2
h.Count *= 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s1, ts, h, nil)
@ -3030,7 +3030,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
hists = tsdbutil.GenerateTestFloatHistograms(numHistograms)
}
for _, h := range hists {
h.Count = h.Count * 2
h.Count *= 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s1, ts, nil, h)
@ -3071,26 +3071,26 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
}
for _, h := range hists {
ts++
h.Count = h.Count * 2
h.Count *= 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s2, int64(ts), h, nil)
_, err := app.AppendHistogram(0, s2, ts, h, nil)
require.NoError(t, err)
eh := h.Copy()
if !gauge && ts > 30 && (ts-10)%20 == 1 {
// Need "unknown" hint after float sample.
eh.CounterResetHint = histogram.UnknownCounterReset
}
exp[k2] = append(exp[k2], sample{t: int64(ts), h: eh})
exp[k2] = append(exp[k2], sample{t: ts, h: eh})
if ts%20 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
// Add some float.
for i := 0; i < 10; i++ {
ts++
_, err := app.Append(0, s2, int64(ts), float64(ts))
_, err := app.Append(0, s2, ts, float64(ts))
require.NoError(t, err)
exp[k2] = append(exp[k2], sample{t: int64(ts), f: float64(ts)})
exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)})
}
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
@ -3108,26 +3108,26 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
}
for _, h := range hists {
ts++
h.Count = h.Count * 2
h.Count *= 2
h.NegativeSpans = h.PositiveSpans
h.NegativeBuckets = h.PositiveBuckets
_, err := app.AppendHistogram(0, s2, int64(ts), nil, h)
_, err := app.AppendHistogram(0, s2, ts, nil, h)
require.NoError(t, err)
eh := h.Copy()
if !gauge && ts > 30 && (ts-10)%20 == 1 {
// Need "unknown" hint after float sample.
eh.CounterResetHint = histogram.UnknownCounterReset
}
exp[k2] = append(exp[k2], sample{t: int64(ts), fh: eh})
exp[k2] = append(exp[k2], sample{t: ts, fh: eh})
if ts%20 == 0 {
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
// Add some float.
for i := 0; i < 10; i++ {
ts++
_, err := app.Append(0, s2, int64(ts), float64(ts))
_, err := app.Append(0, s2, ts, float64(ts))
require.NoError(t, err)
exp[k2] = append(exp[k2], sample{t: int64(ts), f: float64(ts)})
exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)})
}
require.NoError(t, app.Commit())
app = head.Appender(context.Background())
@ -4497,7 +4497,6 @@ func TestHistogramValidation(t *testing.T) {
default:
require.NoError(t, err)
}
switch err := ValidateFloatHistogram(tc.h.ToFloat()); {
case tc.errMsgFloat != "":
require.ErrorContains(t, err, tc.errMsgFloat)

View file

@ -300,7 +300,7 @@ Outer:
unknownRefs.Inc()
continue
}
h.tombstones.AddInterval(storage.SeriesRef(s.Ref), itv)
h.tombstones.AddInterval(s.Ref, itv)
}
}
tstonesPool.Put(v)
@ -383,7 +383,7 @@ Outer:
floatHistogramsPool.Put(v)
case []record.RefMetadata:
for _, m := range v {
s := h.series.getByID(chunks.HeadSeriesRef(m.Ref))
s := h.series.getByID(m.Ref)
if s == nil {
unknownMetadataRefs.Inc()
continue

View file

@ -536,7 +536,7 @@ func (w *Writer) finishSymbols() error {
// Write out the length and symbol count.
w.buf1.Reset()
w.buf1.PutBE32int(int(symbolTableSize))
w.buf1.PutBE32int(int(w.numSymbols))
w.buf1.PutBE32int(w.numSymbols)
if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil {
return err
}

View file

@ -702,7 +702,6 @@ func (rp *removedPostings) Next() bool {
rp.fok = rp.full.Next()
return true
}
switch fcur, rcur := rp.full.At(), rp.remove.At(); {
case fcur < rcur:
rp.cur = fcur

View file

@ -31,10 +31,10 @@ type maxHeap struct {
Items []Stat
}
func (m *maxHeap) init(len int) {
m.maxLength = len
func (m *maxHeap) init(length int) {
m.maxLength = length
m.minValue = math.MaxUint64
m.Items = make([]Stat, 0, len)
m.Items = make([]Stat, 0, length)
}
func (m *maxHeap) push(item Stat) {

View file

@ -254,7 +254,7 @@ func (txr *txRing) add(appendID uint64) {
if txr.txIDCount == len(txr.txIDs) {
// Ring buffer is full, expand by doubling.
newRing := make([]uint64, txr.txIDCount*2)
idx := copy(newRing[:], txr.txIDs[txr.txIDFirst:])
idx := copy(newRing, txr.txIDs[txr.txIDFirst:])
copy(newRing[idx:], txr.txIDs[:txr.txIDFirst])
txr.txIDs = newRing
txr.txIDFirst = 0

View file

@ -967,7 +967,6 @@ func (m *mergedStringIter) Next() bool {
if (!m.aok && !m.bok) || (m.Err() != nil) {
return false
}
switch {
case !m.aok:
m.cur = m.b.At()

View file

@ -114,7 +114,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe
var chunkRef chunks.ChunkRef
for i, s := range tc {
i = i + 1 // 0 is not a valid posting.
i++ // 0 is not a valid posting.
metas := make([]chunks.Meta, 0, len(s.chunks))
for _, chk := range s.chunks {
if chk[0].t < blockMint {
@ -2013,7 +2013,7 @@ func BenchmarkQueries(b *testing.B) {
for x := 0; x <= 10; x++ {
block, err := OpenBlock(nil, createBlock(b, dir, series), nil)
require.NoError(b, err)
q, err := NewBlockQuerier(block, 1, int64(nSamples))
q, err := NewBlockQuerier(block, 1, nSamples)
require.NoError(b, err)
qs = append(qs, q)
}

View file

@ -91,7 +91,7 @@ func newWalMetrics(r prometheus.Registerer) *walMetrics {
// WAL is a write ahead log that can log new series labels and samples.
// It must be completely read before new entries are logged.
//
// DEPRECATED: use wlog pkg combined with the record codex instead.
// Deprecated: use wlog pkg combined with the record codex instead.
type WAL interface {
Reader() WALReader
LogSeries([]record.RefSeries) error
@ -148,7 +148,7 @@ func newCRC32() hash.Hash32 {
// SegmentWAL is a write ahead log for series data.
//
// DEPRECATED: use wlog pkg combined with the record coders instead.
// Deprecated: use wlog pkg combined with the record coders instead.
type SegmentWAL struct {
mtx sync.Mutex
metrics *walMetrics

View file

@ -39,7 +39,7 @@ func getLimits(resource int, unit string) string {
if err != nil {
panic("syscall.Getrlimit failed: " + err.Error())
}
return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(uint64(rlimit.Cur), unit), limitToString(uint64(rlimit.Max), unit))
return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(rlimit.Cur, unit), limitToString(rlimit.Max, unit))
}
// FdLimits returns the soft and hard limits for file descriptors.

View file

@ -72,11 +72,13 @@ func Statfs(path string) string {
var fs syscall.Statfs_t
err := syscall.Statfs(path, &fs)
//nolint:unconvert // This ensure Type format on all Platforms
localType := int64(fs.Type)
if err != nil {
return strconv.FormatInt(int64(fs.Type), 16)
return strconv.FormatInt(localType, 16)
}
if fsType, ok := fsTypes[int64(fs.Type)]; ok {
if fsType, ok := fsTypes[localType]; ok {
return fsType
}
return strconv.FormatInt(int64(fs.Type), 16)
return strconv.FormatInt(localType, 16)
}

View file

@ -243,7 +243,7 @@ func NewAPI(
remoteReadConcurrencyLimit int,
remoteReadMaxBytesInFrame int,
isAgent bool,
CORSOrigin *regexp.Regexp,
corsOrigin *regexp.Regexp,
runtimeInfo func() (RuntimeInfo, error),
buildInfo *PrometheusVersion,
gatherer prometheus.Gatherer,
@ -269,7 +269,7 @@ func NewAPI(
enableAdmin: enableAdmin,
rulesRetriever: rr,
logger: logger,
CORSOrigin: CORSOrigin,
CORSOrigin: corsOrigin,
runtimeInfo: runtimeInfo,
buildInfo: buildInfo,
gatherer: gatherer,

View file

@ -2850,7 +2850,7 @@ func TestRespondSuccess(t *testing.T) {
}
var res response
if err = json.Unmarshal([]byte(body), &res); err != nil {
if err = json.Unmarshal(body, &res); err != nil {
t.Fatalf("Error unmarshaling JSON body: %s", err)
}
@ -2886,7 +2886,7 @@ func TestRespondError(t *testing.T) {
}
var res response
if err = json.Unmarshal([]byte(body), &res); err != nil {
if err = json.Unmarshal(body, &res); err != nil {
t.Fatalf("Error unmarshaling JSON body: %s", err)
}

View file

@ -719,9 +719,9 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
status.StorageRetention += " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
status.StorageRetention += h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()