lint: Adjust to the lint warnings raised by current versions of golint-ci

We haven't updated golint-ci in our CI yet, but this commit prepares
for that.

There are a lot of new warnings, and it is mostly because the "revive"
linter got updated. I agree with most of the new warnings, mostly
around not naming unused function parameters (although it is justified
in some cases for documentation purposes – while things like mocks are
a good example where not naming the parameter is clearer).

I'm pretty upset about the "empty block" warning to include `for`
loops. It's such a common pattern to do something in the head of the
`for` loop and then have an empty block. There is still an open issue
about this: https://github.com/mgechev/revive/issues/810 I have
disabled "revive" altogether in files where empty blocks are used
excessively, and I have made the effort to add individual
`// nolint:revive` where empty blocks are used just once or twice.
It's borderline noisy, though, but let's go with it for now.

I should mention that none of the "empty block" warnings for `for`
loop bodies were legitimate.

Signed-off-by: beorn7 <beorn@grafana.com>
This commit is contained in:
beorn7 2023-04-12 13:05:41 +02:00
parent b028112331
commit c3c7d44d84
69 changed files with 145 additions and 150 deletions

View file

@ -12,6 +12,7 @@
// limitations under the License. // limitations under the License.
// The main package for the Prometheus server executable. // The main package for the Prometheus server executable.
// nolint:revive // Many unsued function arguments in this file by design.
package main package main
import ( import (

View file

@ -44,7 +44,7 @@ func sortSamples(samples []backfillSample) {
}) })
} }
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
samples := []backfillSample{} samples := []backfillSample{}
for ss.Next() { for ss.Next() {

View file

@ -68,7 +68,7 @@ func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient que
} }
// loadGroups parses groups from a list of recording rule files. // loadGroups parses groups from a list of recording rule files.
func (importer *ruleImporter) loadGroups(ctx context.Context, filenames []string) (errs []error) { func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) {
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...) groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
if errs != nil { if errs != nil {
return errs return errs

View file

@ -35,7 +35,7 @@ type mockQueryRangeAPI struct {
samples model.Matrix samples model.Matrix
} }
func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive
return mockAPI.samples, v1.Warnings{}, nil return mockAPI.samples, v1.Warnings{}, nil
} }
@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
} }
} }
func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
logger := log.NewNopLogger() logger := log.NewNopLogger()
cfg := ruleImporterConfig{ cfg := ruleImporterConfig{
outputDir: tmpDir, outputDir: tmpDir,

View file

@ -164,7 +164,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery {
return d return d
} }
func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) { func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
if d.ec2 != nil { if d.ec2 != nil {
return d.ec2, nil return d.ec2, nil
} }

View file

@ -59,7 +59,7 @@ type hcloudDiscovery struct {
} }
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) { func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) {
d := &hcloudDiscovery{ d := &hcloudDiscovery{
port: conf.Port, port: conf.Port,
} }

View file

@ -51,7 +51,7 @@ type robotDiscovery struct {
} }
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets. // newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) { func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
d := &robotDiscovery{ d := &robotDiscovery{
port: conf.Port, port: conf.Port,
endpoint: conf.robotEndpoint, endpoint: conf.robotEndpoint,
@ -69,7 +69,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
return d, nil return d, nil
} }
func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
req, err := http.NewRequest("GET", d.endpoint+"/server", nil) req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -60,7 +60,7 @@ type serverDiscovery struct {
datacenterID string datacenterID string
} }
func newServerDiscovery(conf *SDConfig, logger log.Logger) (*serverDiscovery, error) { func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) {
d := &serverDiscovery{ d := &serverDiscovery{
port: conf.Port, port: conf.Port,
datacenterID: conf.DatacenterID, datacenterID: conf.DatacenterID,

View file

@ -122,11 +122,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer
) )
} }
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) { func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) {
clientGoRequestResultMetricVec.WithLabelValues(code).Inc() clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
} }
func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) { func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) {
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
} }
@ -169,7 +169,7 @@ func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetr
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
} }
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric {
// Retries are not used so the metric is omitted. // Retries are not used so the metric is omitted.
return noopMetric{} return noopMetric{}
} }

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many legitimately empty blocks in this file.
package kubernetes package kubernetes
import ( import (

View file

@ -190,7 +190,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
} }
go func() { go func() {
for e.process(ctx, ch) { for e.process(ctx, ch) { // nolint:revive
} }
}() }()

View file

@ -89,7 +89,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
go func() { go func() {
for i.process(ctx, ch) { for i.process(ctx, ch) { // nolint:revive
} }
}() }()

View file

@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
go func() { go func() {
for n.process(ctx, ch) { for n.process(ctx, ch) { // nolint:revive
} }
}() }()

View file

@ -132,7 +132,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
go func() { go func() {
for p.process(ctx, ch) { for p.process(ctx, ch) { // nolint:revive
} }
}() }()

View file

@ -92,7 +92,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
go func() { go func() {
for s.process(ctx, ch) { for s.process(ctx, ch) { // nolint:revive
} }
}() }()

View file

@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
case tgs := <-provUpdates: case tgs := <-provUpdates:
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
for _, got := range discoveryManager.allGroups() { for _, got := range discoveryManager.allGroups() {
assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string { assertEqualGroups(t, got, tc.expectedTargets[x])
return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v",
x,
got,
expected)
})
} }
} }
} }
@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
} }
} }
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) { func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) {
t.Helper() t.Helper()
// Need to sort by the groups's source as the received order is not guaranteed. // Need to sort by the groups's source as the received order is not guaranteed.
@ -1079,9 +1074,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
if _, ok := tgs[k]; !ok { if _, ok := tgs[k]; !ok {
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
} }
assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string { assertEqualGroups(t, tgs[k], expected.tgs[k])
return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected)
})
} }
} }
} }

View file

@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
case tgs := <-provUpdates: case tgs := <-provUpdates:
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
for _, got := range discoveryManager.allGroups() { for _, got := range discoveryManager.allGroups() {
assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string { assertEqualGroups(t, got, tc.expectedTargets[x])
return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v",
x,
got,
expected)
})
} }
} }
} }
@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
} }
} }
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) { func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) {
t.Helper() t.Helper()
// Need to sort by the groups's source as the received order is not guaranteed. // Need to sort by the groups's source as the received order is not guaranteed.
@ -1129,7 +1124,7 @@ type lockStaticConfig struct {
} }
func (s lockStaticConfig) Name() string { return "lockstatic" } func (s lockStaticConfig) Name() string { return "lockstatic" }
func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) { func (s lockStaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
return (lockStaticDiscoverer)(s), nil return (lockStaticDiscoverer)(s), nil
} }
@ -1330,9 +1325,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
if _, ok := tgs[k]; !ok { if _, ok := tgs[k]; !ok {
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
} }
assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string { assertEqualGroups(t, tgs[k], expected.tgs[k])
return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected)
})
} }
} }
} }
@ -1399,7 +1392,7 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) {
// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when // TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when
// ApplyConfig happens at the same time as targets update. // ApplyConfig happens at the same time as targets update.
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
discoveryManager := NewManager(ctx, log.NewNopLogger()) discoveryManager := NewManager(ctx, log.NewNopLogger())

View file

@ -161,7 +161,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
return d, nil return d, nil
} }
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) {
opts := &nomad.QueryOptions{ opts := &nomad.QueryOptions{
AllowStale: d.allowStale, AllowStale: d.allowStale,
} }

View file

@ -102,7 +102,7 @@ func (d *dedicatedServerDiscovery) getSource() string {
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService()) return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
} }
func (d *dedicatedServerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
client, err := createClient(d.config) client, err := createClient(d.config)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -117,7 +117,7 @@ func (d *vpsDiscovery) getSource() string {
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService()) return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
} }
func (d *vpsDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
client, err := createClient(d.config) client, err := createClient(d.config)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -202,10 +202,8 @@ func (d *Discovery) listInstances(ctx context.Context) ([]govultr.Instance, erro
if meta.Links.Next == "" { if meta.Links.Next == "" {
break break
} else {
listOptions.Cursor = meta.Links.Next
continue
} }
listOptions.Cursor = meta.Links.Next
} }
return instances, nil return instances, nil

View file

@ -193,7 +193,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
for _, pathUpdate := range d.pathUpdates { for _, pathUpdate := range d.pathUpdates {
// Drain event channel in case the treecache leaks goroutines otherwise. // Drain event channel in case the treecache leaks goroutines otherwise.
for range pathUpdate { for range pathUpdate { // nolint:revive
} }
} }
d.conn.Close() d.conn.Close()

View file

@ -238,9 +238,10 @@ func (p *PromParser) Metric(l *labels.Labels) string {
return s return s
} }
// Exemplar writes the exemplar of the current sample into the passed // Exemplar implements the Parser interface. However, since the classic
// exemplar. It returns if an exemplar exists. // Prometheus text format does not support exemplars, this implementation simply
func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool { // returns false and does nothing else.
func (p *PromParser) Exemplar(*exemplar.Exemplar) bool {
return false return false
} }

View file

@ -27,7 +27,7 @@ import (
"github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/teststorage"
) )
func setupRangeQueryTestData(stor *teststorage.TestStorage, engine *Engine, interval, numIntervals int) error { func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error {
metrics := []labels.Labels{} metrics := []labels.Labels{}
metrics = append(metrics, labels.FromStrings("__name__", "a_one")) metrics = append(metrics, labels.FromStrings("__name__", "a_one"))
metrics = append(metrics, labels.FromStrings("__name__", "b_one")) metrics = append(metrics, labels.FromStrings("__name__", "b_one"))

View file

@ -1957,7 +1957,7 @@ func (ev *evaluator) matrixIterSlice(
// (b) the number of samples is relatively small. // (b) the number of samples is relatively small.
// so a linear search will be as fast as a binary search. // so a linear search will be as fast as a binary search.
var drop int var drop int
for drop = 0; floats[drop].T < mint; drop++ { for drop = 0; floats[drop].T < mint; drop++ { // nolint:revive
} }
ev.currentSamples -= drop ev.currentSamples -= drop
copy(floats, floats[drop:]) copy(floats, floats[drop:])

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package promql package promql
import ( import (

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many legitimately empty blocks in this file.
package parser package parser
import ( import (
@ -293,7 +294,7 @@ func (l *Lexer) accept(valid string) bool {
// acceptRun consumes a run of runes from the valid set. // acceptRun consumes a run of runes from the valid set.
func (l *Lexer) acceptRun(valid string) { func (l *Lexer) acceptRun(valid string) {
for strings.ContainsRune(valid, l.next()) { for strings.ContainsRune(valid, l.next()) {
// consume // Consume.
} }
l.backup() l.backup()
} }

View file

@ -332,7 +332,7 @@ func (p *parser) Lex(lval *yySymType) int {
// It is a no-op since the parsers error routines are triggered // It is a no-op since the parsers error routines are triggered
// by mechanisms that allow more fine-grained control // by mechanisms that allow more fine-grained control
// For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc. // For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc.
func (p *parser) Error(e string) { func (p *parser) Error(string) {
} }
// InjectItem allows injecting a single Item at the beginning of the token stream // InjectItem allows injecting a single Item at the beginning of the token stream
@ -481,9 +481,9 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
// This is made a function instead of a variable, so it is lazily evaluated on demand. // This is made a function instead of a variable, so it is lazily evaluated on demand.
opRange := func() (r PositionRange) { opRange := func() (r PositionRange) {
// Remove whitespace at the beginning and end of the range. // Remove whitespace at the beginning and end of the range.
for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { // nolint:revive
} }
for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { // nolint:revive
} }
return return
} }

View file

@ -866,12 +866,13 @@ func (g *Group) RestoreForState(ts time.Time) {
timeSpentPending := downAt.Sub(restoredActiveAt) timeSpentPending := downAt.Sub(restoredActiveAt)
timeRemainingPending := alertHoldDuration - timeSpentPending timeRemainingPending := alertHoldDuration - timeSpentPending
if timeRemainingPending <= 0 { switch {
case timeRemainingPending <= 0:
// It means that alert was firing when prometheus went down. // It means that alert was firing when prometheus went down.
// In the next Eval, the state of this alert will be set back to // In the next Eval, the state of this alert will be set back to
// firing again if it's still firing in that Eval. // firing again if it's still firing in that Eval.
// Nothing to be done in this case. // Nothing to be done in this case.
} else if timeRemainingPending < g.opts.ForGracePeriod { case timeRemainingPending < g.opts.ForGracePeriod:
// (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration // (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration
// /* new firing time */ /* moving back by hold duration */ // /* new firing time */ /* moving back by hold duration */
// //
@ -884,7 +885,7 @@ func (g *Group) RestoreForState(ts time.Time) {
// = (ts + m.opts.ForGracePeriod) - ts // = (ts + m.opts.ForGracePeriod) - ts
// = m.opts.ForGracePeriod // = m.opts.ForGracePeriod
restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration) restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration)
} else { default:
// By shifting ActiveAt to the future (ActiveAt + some_duration), // By shifting ActiveAt to the future (ActiveAt + some_duration),
// the total pending time from the original ActiveAt // the total pending time from the original ActiveAt
// would be `alertHoldDuration + some_duration`. // would be `alertHoldDuration + some_duration`.

View file

@ -779,13 +779,13 @@ func TestUpdate(t *testing.T) {
rgs.Groups[i].Interval = model.Duration(10) rgs.Groups[i].Interval = model.Duration(10)
} }
} }
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
// Update limit and reload. // Update limit and reload.
for i := range rgs.Groups { for i := range rgs.Groups {
rgs.Groups[i].Limit = 1 rgs.Groups[i].Limit = 1
} }
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
// Change group rules and reload. // Change group rules and reload.
for i, g := range rgs.Groups { for i, g := range rgs.Groups {
@ -793,7 +793,7 @@ func TestUpdate(t *testing.T) {
rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value)) rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value))
} }
} }
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
} }
// ruleGroupsTest for running tests over rules. // ruleGroupsTest for running tests over rules.
@ -836,7 +836,7 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest {
} }
} }
func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, expected map[string]labels.Labels, ogs map[string]*Group) { func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, ogs map[string]*Group) {
bs, err := yaml.Marshal(formatRules(rgs)) bs, err := yaml.Marshal(formatRules(rgs))
require.NoError(t, err) require.NoError(t, err)
tmpFile.Seek(0, 0) tmpFile.Seek(0, 0)

View file

@ -30,19 +30,19 @@ type unknownRule struct{}
func (u unknownRule) Name() string { return "" } func (u unknownRule) Name() string { return "" }
func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() } func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() }
func (u unknownRule) Eval(ctx context.Context, time time.Time, queryFunc QueryFunc, url *url.URL, i int) (promql.Vector, error) { func (u unknownRule) Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) {
return nil, nil return nil, nil
} }
func (u unknownRule) String() string { return "" } func (u unknownRule) String() string { return "" }
func (u unknownRule) Query() parser.Expr { return nil } func (u unknownRule) Query() parser.Expr { return nil }
func (u unknownRule) SetLastError(err error) {} func (u unknownRule) SetLastError(error) {}
func (u unknownRule) LastError() error { return nil } func (u unknownRule) LastError() error { return nil }
func (u unknownRule) SetHealth(health RuleHealth) {} func (u unknownRule) SetHealth(RuleHealth) {}
func (u unknownRule) Health() RuleHealth { return "" } func (u unknownRule) Health() RuleHealth { return "" }
func (u unknownRule) SetEvaluationDuration(duration time.Duration) {} func (u unknownRule) SetEvaluationDuration(time.Duration) {}
func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 } func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 }
func (u unknownRule) SetEvaluationTimestamp(time time.Time) {} func (u unknownRule) SetEvaluationTimestamp(time.Time) {}
func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} } func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} }
func TestNewRuleDetailPanics(t *testing.T) { func TestNewRuleDetailPanics(t *testing.T) {
require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() { require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() {

View file

@ -2405,7 +2405,7 @@ type testScraper struct {
scrapeFunc func(context.Context, io.Writer) error scrapeFunc func(context.Context, io.Writer) error
} }
func (ts *testScraper) offset(interval time.Duration, jitterSeed uint64) time.Duration { func (ts *testScraper) offset(time.Duration, uint64) time.Duration {
return ts.offsetDur return ts.offsetDur
} }
@ -2867,7 +2867,7 @@ func TestScrapeAddFast(t *testing.T) {
require.NoError(t, slApp.Commit()) require.NoError(t, slApp.Commit())
} }
func TestReuseCacheRace(t *testing.T) { func TestReuseCacheRace(*testing.T) {
var ( var (
app = &nopAppendable{} app = &nopAppendable{}
cfg = &config.ScrapeConfig{ cfg = &config.ScrapeConfig{

View file

@ -134,7 +134,7 @@ func TestTargetURL(t *testing.T) {
require.Equal(t, expectedURL, target.URL()) require.Equal(t, expectedURL, target.URL())
} }
func newTestTarget(targetURL string, deadline time.Duration, lbls labels.Labels) *Target { func newTestTarget(targetURL string, _ time.Duration, lbls labels.Labels) *Target {
lb := labels.NewBuilder(lbls) lb := labels.NewBuilder(lbls)
lb.Set(model.SchemeLabel, "http") lb.Set(model.SchemeLabel, "http")
lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://")) lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://"))

View file

@ -188,8 +188,8 @@ func BenchmarkBufferedSeriesIterator(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for it.Next() != chunkenc.ValNone { for it.Next() != chunkenc.ValNone { // nolint:revive
// scan everything // Scan everything.
} }
require.NoError(b, it.Err()) require.NoError(b, it.Err())
} }

View file

@ -233,7 +233,7 @@ func (errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage
return storage.ErrSeriesSet(errSelect) return storage.ErrSeriesSet(errSelect)
} }
func (errQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { func (errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) {
return nil, nil, errors.New("label values error") return nil, nil, errors.New("label values error")
} }

View file

@ -99,7 +99,7 @@ type MockQueryable struct {
MockQuerier Querier MockQuerier Querier
} }
func (q *MockQueryable) Querier(ctx context.Context, mint, maxt int64) (Querier, error) { func (q *MockQueryable) Querier(context.Context, int64, int64) (Querier, error) {
return q.MockQuerier, nil return q.MockQuerier, nil
} }
@ -118,11 +118,11 @@ type MockQuerier struct {
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
} }
func (q *MockQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { func (q *MockQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) {
return nil, nil, nil return nil, nil, nil
} }
func (q *MockQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) { func (q *MockQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) {
return nil, nil, nil return nil, nil, nil
} }

View file

@ -82,8 +82,8 @@ func BenchmarkMemoizedSeriesIterator(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for it.Next() != chunkenc.ValNone { for it.Next() != chunkenc.ValNone { // nolint:revive
// scan everything // Scan everything.
} }
require.NoError(b, it.Err()) require.NoError(b, it.Err())
} }

View file

@ -722,12 +722,11 @@ func (c *compactChunkIterator) Next() bool {
break break
} }
if next.MinTime == prev.MinTime && // Only do something if it is not a perfect duplicate.
next.MaxTime == prev.MaxTime && if next.MinTime != prev.MinTime ||
bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) { next.MaxTime != prev.MaxTime ||
// 1:1 duplicates, skip it. !bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) {
} else { // We operate on same series, so labels do not matter here.
// We operate on same series, so labels does not matter here.
overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next)) overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next))
if next.MaxTime > oMaxTime { if next.MaxTime > oMaxTime {
oMaxTime = next.MaxTime oMaxTime = next.MaxTime

View file

@ -524,7 +524,7 @@ func TestDecodeWriteRequest(t *testing.T) {
require.Equal(t, writeRequestFixture, actual) require.Equal(t, writeRequestFixture, actual)
} }
func TestNilHistogramProto(t *testing.T) { func TestNilHistogramProto(*testing.T) {
// This function will panic if it impromperly handles nil // This function will panic if it impromperly handles nil
// values, causing the test to fail. // values, causing the test to fail.
HistogramProtoToHistogram(prompb.Histogram{}) HistogramProtoToHistogram(prompb.Histogram{})

View file

@ -362,7 +362,7 @@ func TestReshard(t *testing.T) {
c.waitForExpectedData(t) c.waitForExpectedData(t)
} }
func TestReshardRaceWithStop(t *testing.T) { func TestReshardRaceWithStop(*testing.T) {
c := NewTestWriteClient() c := NewTestWriteClient()
var m *QueueManager var m *QueueManager
h := sync.Mutex{} h := sync.Mutex{}
@ -864,10 +864,10 @@ func (c *TestBlockingWriteClient) Endpoint() string {
// For benchmarking the send and not the receive side. // For benchmarking the send and not the receive side.
type NopWriteClient struct{} type NopWriteClient struct{}
func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} } func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} }
func (c *NopWriteClient) Store(_ context.Context, req []byte) error { return nil } func (c *NopWriteClient) Store(context.Context, []byte) error { return nil }
func (c *NopWriteClient) Name() string { return "nopwriteclient" } func (c *NopWriteClient) Name() string { return "nopwriteclient" }
func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" }
func BenchmarkSampleSend(b *testing.B) { func BenchmarkSampleSend(b *testing.B) {
// Send one sample per series, which is the typical remote_write case // Send one sample per series, which is the typical remote_write case

View file

@ -294,7 +294,7 @@ func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e
return 0, nil return 0, nil
} }
func (m *mockAppendable) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if t < m.latestHistogram { if t < m.latestHistogram {
return 0, storage.ErrOutOfOrderSample return 0, storage.ErrOutOfOrderSample
} }

View file

@ -732,22 +732,22 @@ func (db *DB) StartTime() (int64, error) {
} }
// Querier implements the Storage interface. // Querier implements the Storage interface.
func (db *DB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { func (db *DB) Querier(context.Context, int64, int64) (storage.Querier, error) {
return nil, ErrUnsupported return nil, ErrUnsupported
} }
// ChunkQuerier implements the Storage interface. // ChunkQuerier implements the Storage interface.
func (db *DB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { func (db *DB) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) {
return nil, ErrUnsupported return nil, ErrUnsupported
} }
// ExemplarQuerier implements the Storage interface. // ExemplarQuerier implements the Storage interface.
func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { func (db *DB) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) {
return nil, ErrUnsupported return nil, ErrUnsupported
} }
// Appender implements storage.Storage. // Appender implements storage.Storage.
func (db *DB) Appender(_ context.Context) storage.Appender { func (db *DB) Appender(context.Context) storage.Appender {
return db.appenderPool.Get().(storage.Appender) return db.appenderPool.Get().(storage.Appender)
} }
@ -823,7 +823,7 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo
return 0, storage.ErrOutOfOrderSample return 0, storage.ErrOutOfOrderSample
} }
// NOTE: always modify pendingSamples and sampleSeries together // NOTE: always modify pendingSamples and sampleSeries together.
a.pendingSamples = append(a.pendingSamples, record.RefSample{ a.pendingSamples = append(a.pendingSamples, record.RefSample{
Ref: series.ref, Ref: series.ref,
T: t, T: t,
@ -849,8 +849,8 @@ func (a *appender) getOrCreate(l labels.Labels) (series *memSeries, created bool
return series, true return series, true
} }
func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
// series references and chunk references are identical for agent mode. // Series references and chunk references are identical for agent mode.
headRef := chunks.HeadSeriesRef(ref) headRef := chunks.HeadSeriesRef(ref)
s := a.series.GetByID(headRef) s := a.series.GetByID(headRef)
@ -973,7 +973,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
return storage.SeriesRef(series.ref), nil return storage.SeriesRef(series.ref), nil
} }
func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Wire metadata in the Agent's appender. // TODO: Wire metadata in the Agent's appender.
return 0, nil return 0, nil
} }

View file

@ -107,7 +107,7 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) {
// To get an appender, we must know the state it would have if we had // To get an appender, we must know the state it would have if we had
// appended all existing data from scratch. We iterate through the end // appended all existing data from scratch. We iterate through the end
// and populate via the iterator's state. // and populate via the iterator's state.
for it.Next() == ValFloatHistogram { for it.Next() == ValFloatHistogram { // nolint:revive
} }
if err := it.Err(); err != nil { if err := it.Err(); err != nil {
return nil, err return nil, err

View file

@ -111,7 +111,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
// 3. Now recycle an iterator that was never used to access anything. // 3. Now recycle an iterator that was never used to access anything.
itX := c.Iterator(nil) itX := c.Iterator(nil)
for itX.Next() == ValFloatHistogram { for itX.Next() == ValFloatHistogram { // nolint:revive
// Just iterate through without accessing anything. // Just iterate through without accessing anything.
} }
it3 := c.iterator(itX) it3 := c.iterator(itX)

View file

@ -126,7 +126,7 @@ func (c *HistogramChunk) Appender() (Appender, error) {
// To get an appender, we must know the state it would have if we had // To get an appender, we must know the state it would have if we had
// appended all existing data from scratch. We iterate through the end // appended all existing data from scratch. We iterate through the end
// and populate via the iterator's state. // and populate via the iterator's state.
for it.Next() == ValHistogram { for it.Next() == ValHistogram { // nolint:revive
} }
if err := it.Err(); err != nil { if err := it.Err(); err != nil {
return nil, err return nil, err

View file

@ -116,7 +116,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
// 3. Now recycle an iterator that was never used to access anything. // 3. Now recycle an iterator that was never used to access anything.
itX := c.Iterator(nil) itX := c.Iterator(nil)
for itX.Next() == ValHistogram { for itX.Next() == ValHistogram { // nolint:revive
// Just iterate through without accessing anything. // Just iterate through without accessing anything.
} }
it3 := c.iterator(itX) it3 := c.iterator(itX)

View file

@ -99,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) {
// To get an appender we must know the state it would have if we had // To get an appender we must know the state it would have if we had
// appended all existing data from scratch. // appended all existing data from scratch.
// We iterate through the end and populate via the iterator's state. // We iterate through the end and populate via the iterator's state.
for it.Next() != ValNone { for it.Next() != ValNone { // nolint:revive
} }
if err := it.Err(); err != nil { if err := it.Err(); err != nil {
return nil, err return nil, err
@ -152,11 +152,11 @@ type xorAppender struct {
trailing uint8 trailing uint8
} }
func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) { func (a *xorAppender) AppendHistogram(int64, *histogram.Histogram) {
panic("appended a histogram to an xor chunk") panic("appended a histogram to an xor chunk")
} }
func (a *xorAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) { func (a *xorAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) {
panic("appended a float histogram to an xor chunk") panic("appended a float histogram to an xor chunk")
} }

View file

@ -503,10 +503,10 @@ func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper {
func randomChunk(t *testing.T) chunkenc.Chunk { func randomChunk(t *testing.T) chunkenc.Chunk {
chunk := chunkenc.NewXORChunk() chunk := chunkenc.NewXORChunk()
len := rand.Int() % 120 length := rand.Int() % 120
app, err := chunk.Appender() app, err := chunk.Appender()
require.NoError(t, err) require.NoError(t, err)
for i := 0; i < len; i++ { for i := 0; i < length; i++ {
app.Append(rand.Int63(), rand.Float64()) app.Append(rand.Int63(), rand.Float64())
} }
return chunk return chunk

View file

@ -467,8 +467,8 @@ func (erringBReader) Size() int64 { return 0 }
type nopChunkWriter struct{} type nopChunkWriter struct{}
func (nopChunkWriter) WriteChunks(chunks ...chunks.Meta) error { return nil } func (nopChunkWriter) WriteChunks(...chunks.Meta) error { return nil }
func (nopChunkWriter) Close() error { return nil } func (nopChunkWriter) Close() error { return nil }
func samplesForRange(minTime, maxTime int64, maxSamplesPerChunk int) (ret [][]sample) { func samplesForRange(minTime, maxTime int64, maxSamplesPerChunk int) (ret [][]sample) {
var curr []sample var curr []sample

View file

@ -1426,11 +1426,11 @@ type mockCompactorFailing struct {
max int max int
} }
func (*mockCompactorFailing) Plan(dir string) ([]string, error) { func (*mockCompactorFailing) Plan(string) ([]string, error) {
return nil, nil return nil, nil
} }
func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) { func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) (ulid.ULID, error) {
if len(c.blocks) >= c.max { if len(c.blocks) >= c.max {
return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail")
} }
@ -1458,7 +1458,7 @@ func (*mockCompactorFailing) Compact(string, []string, []*Block) (ulid.ULID, err
return ulid.ULID{}, nil return ulid.ULID{}, nil
} }
func (*mockCompactorFailing) CompactOOO(dest string, oooHead *OOOCompactionHead) (result []ulid.ULID, err error) { func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) {
return nil, fmt.Errorf("mock compaction failing CompactOOO") return nil, fmt.Errorf("mock compaction failing CompactOOO")
} }

View file

@ -115,17 +115,17 @@ func NewExemplarMetrics(reg prometheus.Registerer) *ExemplarMetrics {
// 1GB of extra memory, accounting for the fact that this is heap allocated space. // 1GB of extra memory, accounting for the fact that this is heap allocated space.
// If len <= 0, then the exemplar storage is essentially a noop storage but can later be // If len <= 0, then the exemplar storage is essentially a noop storage but can later be
// resized to store exemplars. // resized to store exemplars.
func NewCircularExemplarStorage(len int64, m *ExemplarMetrics) (ExemplarStorage, error) { func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStorage, error) {
if len < 0 { if length < 0 {
len = 0 length = 0
} }
c := &CircularExemplarStorage{ c := &CircularExemplarStorage{
exemplars: make([]*circularBufferEntry, len), exemplars: make([]*circularBufferEntry, length),
index: make(map[string]*indexEntry, len/estimatedExemplarsPerSeries), index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries),
metrics: m, metrics: m,
} }
c.metrics.maxExemplars.Set(float64(len)) c.metrics.maxExemplars.Set(float64(length))
return c, nil return c, nil
} }

View file

@ -24,4 +24,4 @@ import (
// //
// The blank import above is actually what invokes the test of this package. If // The blank import above is actually what invokes the test of this package. If
// the import succeeds (the code compiles), the test passed. // the import succeeds (the code compiles), the test passed.
func Test(t *testing.T) {} func Test(*testing.T) {}

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many legitimately empty blocks in this file.
package tsdb package tsdb
import ( import (
@ -103,7 +104,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) {
b.Cleanup(func() { require.NoError(b, h.Close()) }) b.Cleanup(func() { require.NoError(b, h.Close()) })
ts := int64(1000) ts := int64(1000)
append := func() error { appendSamples := func() error {
var err error var err error
app := h.Appender(context.Background()) app := h.Appender(context.Background())
for _, s := range series[:seriesCount] { for _, s := range series[:seriesCount] {
@ -120,13 +121,13 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) {
} }
// Init series, that's not what we're benchmarking here. // Init series, that's not what we're benchmarking here.
require.NoError(b, append()) require.NoError(b, appendSamples())
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
require.NoError(b, append()) require.NoError(b, appendSamples())
} }
}) })
} }

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many legitimately empty blocks in this file.
package tsdb package tsdb
import ( import (

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package tsdb package tsdb
import ( import (

View file

@ -1085,7 +1085,7 @@ func newNopChunkReader() ChunkReader {
} }
} }
func (cr nopChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { func (cr nopChunkReader) Chunk(chunks.Meta) (chunkenc.Chunk, error) {
return cr.emptyChunk, nil return cr.emptyChunk, nil
} }

View file

@ -250,7 +250,7 @@ func BenchmarkQuerierSelect(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
ss := q.Select(sorted, nil, matcher) ss := q.Select(sorted, nil, matcher)
for ss.Next() { for ss.Next() { // nolint:revive
} }
require.NoError(b, ss.Err()) require.NoError(b, ss.Err())
} }

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package tsdb package tsdb
import ( import (

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package tsdb package tsdb
import ( import (

View file

@ -533,7 +533,7 @@ func TestReaderData(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
reader := fn(sr) reader := fn(sr)
for reader.Next() { for reader.Next() { // nolint:revive
} }
require.NoError(t, reader.Err()) require.NoError(t, reader.Err())

View file

@ -164,7 +164,7 @@ func TestWALRepair_ReadingError(t *testing.T) {
sr := NewSegmentBufReader(s) sr := NewSegmentBufReader(s)
require.NoError(t, err) require.NoError(t, err)
r := NewReader(sr) r := NewReader(sr)
for r.Next() { for r.Next() { // nolint:revive
} }
// Close the segment so we don't break things on Windows. // Close the segment so we don't break things on Windows.

View file

@ -22,7 +22,7 @@ import (
type counter int type counter int
func (c *counter) Log(keyvals ...interface{}) error { func (c *counter) Log(...interface{}) error {
(*c)++ (*c)++
return nil return nil
} }

View file

@ -37,6 +37,6 @@ func (c *MockContext) Err() error {
} }
// Value ignores the Value and always returns nil // Value ignores the Value and always returns nil
func (c *MockContext) Value(key interface{}) interface{} { func (c *MockContext) Value(interface{}) interface{} {
return nil return nil
} }

View file

@ -22,7 +22,7 @@ type roundTrip struct {
theError error theError error
} }
func (rt *roundTrip) RoundTrip(r *http.Request) (*http.Response, error) { func (rt *roundTrip) RoundTrip(*http.Request) (*http.Response, error) {
return rt.theResponse, rt.theError return rt.theResponse, rt.theError
} }

View file

@ -116,7 +116,7 @@ func (tc *ZookeeperTreeCache) Stop() {
tc.stop <- struct{}{} tc.stop <- struct{}{}
go func() { go func() {
// Drain tc.head.events so that go routines can make progress and exit. // Drain tc.head.events so that go routines can make progress and exit.
for range tc.head.events { for range tc.head.events { // nolint:revive
} }
}() }()
go func() { go func() {

View file

@ -117,7 +117,7 @@ type RulesRetriever interface {
type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats
func defaultStatsRenderer(ctx context.Context, s *stats.Statistics, param string) stats.QueryStats { func defaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats {
if param != "" { if param != "" {
return stats.NewQueryStats(s) return stats.NewQueryStats(s)
} }
@ -392,7 +392,7 @@ func invalidParamError(err error, parameter string) apiFuncResult {
}, nil, nil} }, nil, nil}
} }
func (api *API) options(r *http.Request) apiFuncResult { func (api *API) options(*http.Request) apiFuncResult {
return apiFuncResult{nil, nil, nil, nil} return apiFuncResult{nil, nil, nil, nil}
} }
@ -1565,7 +1565,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult {
}{name}, nil, nil, nil} }{name}, nil, nil, nil}
} }
func (api *API) cleanTombstones(r *http.Request) apiFuncResult { func (api *API) cleanTombstones(*http.Request) apiFuncResult {
if !api.enableAdmin { if !api.enableAdmin {
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil} return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
} }
@ -1764,7 +1764,7 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectEnd() stream.WriteObjectEnd()
} }
func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { func marshalSeriesJSONIsEmpty(unsafe.Pointer) bool {
return false return false
} }
@ -1817,7 +1817,7 @@ func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectEnd() stream.WriteObjectEnd()
} }
func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool { func marshalSampleJSONIsEmpty(unsafe.Pointer) bool {
return false return false
} }
@ -1841,7 +1841,7 @@ func marshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteArrayEnd() stream.WriteArrayEnd()
} }
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { func marshalPointJSONIsEmpty(unsafe.Pointer) bool {
return false return false
} }
@ -1878,6 +1878,6 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectEnd() stream.WriteObjectEnd()
} }
func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool { func marshalExemplarJSONEmpty(unsafe.Pointer) bool {
return false return false
} }

View file

@ -2560,9 +2560,9 @@ type fakeDB struct {
err error err error
} }
func (f *fakeDB) CleanTombstones() error { return f.err } func (f *fakeDB) CleanTombstones() error { return f.err }
func (f *fakeDB) Delete(mint, maxt int64, ms ...*labels.Matcher) error { return f.err } func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err }
func (f *fakeDB) Snapshot(dir string, withHead bool) error { return f.err } func (f *fakeDB) Snapshot(string, bool) error { return f.err }
func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) { func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) {
dbDir, err := os.MkdirTemp("", "tsdb-api-ready") dbDir, err := os.MkdirTemp("", "tsdb-api-ready")
if err != nil { if err != nil {

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package v1 package v1
import ( import (

View file

@ -755,14 +755,14 @@ func toFloat64(f *io_prometheus_client.MetricFamily) float64 {
return math.NaN() return math.NaN()
} }
func (h *Handler) version(w http.ResponseWriter, r *http.Request) { func (h *Handler) version(w http.ResponseWriter, _ *http.Request) {
dec := json.NewEncoder(w) dec := json.NewEncoder(w)
if err := dec.Encode(h.versionInfo); err != nil { if err := dec.Encode(h.versionInfo); err != nil {
http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError)
} }
} }
func (h *Handler) quit(w http.ResponseWriter, r *http.Request) { func (h *Handler) quit(w http.ResponseWriter, _ *http.Request) {
var closed bool var closed bool
h.quitOnce.Do(func() { h.quitOnce.Do(func() {
closed = true closed = true
@ -774,7 +774,7 @@ func (h *Handler) quit(w http.ResponseWriter, r *http.Request) {
} }
} }
func (h *Handler) reload(w http.ResponseWriter, r *http.Request) { func (h *Handler) reload(w http.ResponseWriter, _ *http.Request) {
rc := make(chan error) rc := make(chan error)
h.reloadCh <- rc h.reloadCh <- rc
if err := <-rc; err != nil { if err := <-rc; err != nil {