golangci-lint: enable whitespace linter (#13905)

Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
Matthieu MOREL 2024-04-11 10:27:54 +02:00 committed by GitHub
parent 594b317ecc
commit 6f595c6762
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 1 additions and 33 deletions

View file

@ -27,6 +27,7 @@ linters:
- unconvert
- unused
- usestdlibvars
- whitespace
issues:
max-same-issues: 0

View file

@ -78,7 +78,6 @@ func TestBackfillRuleIntegration(t *testing.T) {
// Execute the test more than once to simulate running the rule importer twice with the same data.
// We expect duplicate blocks with the same series are created when run more than once.
for i := 0; i < tt.runcount; i++ {
ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples, tt.maxBlockDuration)
require.NoError(t, err)
path1 := filepath.Join(tmpDir, "test.file")

View file

@ -228,7 +228,6 @@ func targetsForApp(app *Application) []model.LabelSet {
}
targets = append(targets, target)
}
return targets
}

View file

@ -122,7 +122,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
labels[hetznerLabelPublicIPv6Network] = model.LabelValue(fmt.Sprintf("%s/%s", subnet.IP, subnet.Mask))
break
}
}
targets[i] = labels
}

View file

@ -453,7 +453,6 @@ func targetsForApp(app *app) []model.LabelSet {
// Gather info about the app's 'tasks'. Each instance (container) is considered a task
// and can be reachable at one or more host:port endpoints.
for _, t := range app.Tasks {
// There are no labels to gather if only Ports is defined. (eg. with host networking)
// Ports can only be gathered from the Task (not from the app) and are guaranteed
// to be the same across all tasks. If we haven't gathered any ports by now,
@ -464,7 +463,6 @@ func targetsForApp(app *app) []model.LabelSet {
// Iterate over the ports we gathered using one of the methods above.
for i, port := range ports {
// A zero port here means that either the portMapping has a zero port defined,
// or there is a portDefinition with requirePorts set to false. This means the port
// is auto-generated by Mesos and needs to be looked up in the task.
@ -516,7 +514,6 @@ func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32
labels := make([]map[string]string, len(portMappings))
for i := 0; i < len(portMappings); i++ {
labels[i] = portMappings[i].Labels
if containerNet {

View file

@ -80,7 +80,6 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
}
return []*targetgroup.Group{tg}, nil
}

View file

@ -190,7 +190,6 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
targets = append(targets, labels)
}
}
return []*targetgroup.Group{{Source: "scaleway", Targets: targets}}, nil

View file

@ -291,7 +291,6 @@ func parseServersetMember(data []byte, path string) (model.LabelSet, error) {
endpoint.Host)
labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue(
fmt.Sprintf("%d", endpoint.Port))
}
labels[serversetStatusLabel] = model.LabelValue(member.Status)

View file

@ -342,7 +342,6 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode
// Run the smoothing operation.
var x, y float64
for i := 1; i < l; i++ {
// Scale the raw value against the smoothing factor.
x = sf * samples.Floats[i].F
@ -1240,7 +1239,6 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb
}
mb.buckets = append(mb.buckets, bucket{upperBound, sample.F})
}
// Now deal with the histograms.

View file

@ -258,7 +258,6 @@ func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue
labels = result.labels
values = result.values
}
if len(p.parseErrors) != 0 {

View file

@ -567,7 +567,6 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s))
}
}
}
for hash := range ev.expected {

View file

@ -336,7 +336,6 @@ func TestForStateAddSamples(t *testing.T) {
for _, aa := range rule.ActiveAlerts() {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
}
}
@ -1824,7 +1823,6 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) {
} else {
require.Equal(t, orig[h], depMap)
}
}
// Groups will be recreated when updated.
@ -1962,7 +1960,6 @@ func TestAsyncRuleEvaluation(t *testing.T) {
require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
// Each rule produces one vector.
require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
}
})

View file

@ -185,7 +185,6 @@ func (m *Manager) reload() {
sp.Sync(groups)
wg.Done()
}(m.scrapePools[setName], groups)
}
m.mtxScrape.Unlock()
wg.Wait()

View file

@ -583,7 +583,6 @@ func TestManagerTargetsUpdates(t *testing.T) {
tgSent := make(map[string][]*targetgroup.Group)
for x := 0; x < 10; x++ {
tgSent[strconv.Itoa(x)] = []*targetgroup.Group{
{
Source: strconv.Itoa(x),

View file

@ -592,7 +592,6 @@ func TestMaxSchemaAppender(t *testing.T) {
_, err = app.AppendHistogram(0, lbls, ts, nil, fh)
require.Equal(t, c.expectSchema, fh.Schema)
require.NoError(t, err)
} else {
h := c.h.Copy()
_, err = app.AppendHistogram(0, lbls, ts, h, nil)

View file

@ -377,7 +377,6 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
actChks, actErr := ExpandChunks(actualSeries.Iterator(nil))
require.Equal(t, expErr, actErr)
require.Equal(t, expChks, actChks)
}
require.NoError(t, merged.Err())
require.False(t, tc.expected.Next(), "Expected Next() to be false")

View file

@ -845,7 +845,6 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
} else {
c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram)
}
}
}
if c.withWaitGroup {

View file

@ -135,7 +135,6 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
}
return err
}
}
for _, ep := range ts.Exemplars {

View file

@ -459,7 +459,6 @@ func TestLabelNamesWithMatchers(t *testing.T) {
"unique", fmt.Sprintf("value%d", i),
), []chunks.Sample{sample{100, 0, nil, nil}}))
}
}
blockDir := createBlock(t, tmpdir, seriesEntries)

View file

@ -73,7 +73,6 @@ func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
return nil, err
}
for i := 0; i < int(num); i++ {
length, err := readVarbitUint(b)
if err != nil {
return nil, err

View file

@ -1766,7 +1766,6 @@ func OverlappingBlocks(bm []BlockMeta) Overlaps {
// Fetch the critical overlapped time range foreach overlap groups.
overlapGroups := Overlaps{}
for _, overlap := range overlaps {
minRange := TimeRange{Min: 0, Max: math.MaxInt64}
for _, b := range overlap {
if minRange.Max > b.MaxTime {

View file

@ -1275,7 +1275,6 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts
// encoding. So we cut a new chunk with the expected encoding.
c = s.cutNewHeadChunk(t, e, o.chunkRange)
chunkCreated = true
}
numSamples := c.chunk.NumSamples()

View file

@ -1215,7 +1215,6 @@ func TestHeadDeleteSimple(t *testing.T) {
for _, smpl := range smplsAll {
_, err := app.Append(0, lblsDefault, smpl.t, smpl.f)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
@ -1229,7 +1228,6 @@ func TestHeadDeleteSimple(t *testing.T) {
for _, smpl := range c.addSamples {
_, err := app.Append(0, lblsDefault, smpl.t, smpl.f)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
@ -3851,7 +3849,6 @@ func TestChunkSnapshot(t *testing.T) {
}
{ // Additional data to only include in WAL and m-mapped chunks and not snapshot. This mimics having an old snapshot on disk.
// Add more samples.
app := head.Appender(context.Background())
for i := 1; i <= numSeries; i++ {

View file

@ -1323,7 +1323,6 @@ func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error {
errs.Add(err)
}
}
}
return errs.Err()
}

View file

@ -1528,7 +1528,6 @@ func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labe
values = append(values, k)
}
return values, nil
}
e, ok := r.postings[name]
if !ok {

View file

@ -844,7 +844,6 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
return true
}
}
}
return false
}

View file

@ -213,7 +213,6 @@ func (w *Watcher) setMetrics() {
w.samplesSentPreTailing = w.metrics.samplesSentPreTailing.WithLabelValues(w.name)
w.currentSegmentMetric = w.metrics.currentSegment.WithLabelValues(w.name)
w.notificationsSkipped = w.metrics.notificationsSkipped.WithLabelValues(w.name)
}
}