From 48c439d26da86c78ae0d6949554d61b8b32f580c Mon Sep 17 00:00:00 2001 From: Krasi Georgiev Date: Wed, 2 Jan 2019 19:48:42 +0300 Subject: [PATCH] fix statick check errors (#475) fix the tests for `check_license` and `staticcheck` the static check also found some actual bugs. Signed-off-by: Krasi Georgiev --- .travis.yml | 6 +++--- Makefile | 8 ++++++-- checkpoint_test.go | 4 ++-- cmd/tsdb/main.go | 14 ++++++++------ db_test.go | 1 + head.go | 2 ++ head_test.go | 4 ++-- index/index_test.go | 2 ++ labels/labels_test.go | 2 +- querier.go | 4 +--- repair_test.go | 4 ++-- staticcheck.conf | 2 ++ wal.go | 3 +++ wal_test.go | 2 +- 14 files changed, 36 insertions(+), 22 deletions(-) create mode 100644 staticcheck.conf diff --git a/.travis.yml b/.travis.yml index c03d14f53..c5012a34a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,10 +15,10 @@ go_import_path: github.com/prometheus/tsdb before_install: - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install make; fi - + install: - - go get -v -t ./... + - make deps script: # `staticcheck` target is omitted due to linting errors - - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then make test; else make check_license style unused test; fi + - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then make test; else make; fi diff --git a/Makefile b/Makefile index eff204cd2..3458e7fa4 100644 --- a/Makefile +++ b/Makefile @@ -18,11 +18,15 @@ TSDB_BENCHMARK_NUM_METRICS ?= 1000 TSDB_BENCHMARK_DATASET ?= "$(TSDB_PROJECT_DIR)/testdata/20kseries.json" TSDB_BENCHMARK_OUTPUT_DIR ?= "$(TSDB_CLI_DIR)/benchout" -STATICCHECK_IGNORE = include Makefile.common +.PHONY: deps +deps: + @echo ">> getting dependencies" + GO111MODULE=$(GO111MODULE) $(GO) get $(GOOPTS) -t ./... + build: - @$(GO) build -o $(TSDB_BIN) $(TSDB_CLI_DIR) + GO111MODULE=$(GO111MODULE) $(GO) build -o $(TSDB_BIN) $(TSDB_CLI_DIR) bench: build @echo ">> running benchmark, writing result to $(TSDB_BENCHMARK_OUTPUT_DIR)" diff --git a/checkpoint_test.go b/checkpoint_test.go index 76c486a7d..8538cf0c6 100644 --- a/checkpoint_test.go +++ b/checkpoint_test.go @@ -31,11 +31,11 @@ func TestLastCheckpoint(t *testing.T) { testutil.Ok(t, err) defer os.RemoveAll(dir) - s, k, err := LastCheckpoint(dir) + _, _, err = LastCheckpoint(dir) testutil.Equals(t, ErrNotFound, err) testutil.Ok(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0777)) - s, k, err = LastCheckpoint(dir) + s, k, err := LastCheckpoint(dir) testutil.Ok(t, err) testutil.Equals(t, filepath.Join(dir, "checkpoint.0000"), s) testutil.Equals(t, 0, k) diff --git a/cmd/tsdb/main.go b/cmd/tsdb/main.go index c5fac9cd8..01d85d590 100644 --- a/cmd/tsdb/main.go +++ b/cmd/tsdb/main.go @@ -51,7 +51,7 @@ func main() { listPath = listCmd.Arg("db path", "database path (default is "+filepath.Join("benchout", "storage")+")").Default(filepath.Join("benchout", "storage")).String() analyzeCmd = cli.Command("analyze", "analyze churn, label pair cardinality.") analyzePath = analyzeCmd.Arg("db path", "database path (default is "+filepath.Join("benchout", "storage")+")").Default(filepath.Join("benchout", "storage")).String() - analyzeBlockId = analyzeCmd.Arg("block id", "block to analyze (default is the last block)").String() + analyzeBlockID = analyzeCmd.Arg("block id", "block to analyze (default is the last block)").String() analyzeLimit = analyzeCmd.Flag("limit", "how many items to show in each list").Default("20").Int() ) @@ -76,9 +76,9 @@ func main() { } blocks := db.Blocks() var block *tsdb.Block - if *analyzeBlockId != "" { + if *analyzeBlockID != "" { for _, b := range blocks { - if b.Meta().ULID.String() == *analyzeBlockId { + if b.Meta().ULID.String() == *analyzeBlockID { block = b break } @@ -455,15 +455,17 @@ func analyzeBlock(b *tsdb.Block, limit int) { lbls := labels.Labels{} chks := []chunks.Meta{} for p.Next() { - err = ir.Series(p.At(), &lbls, &chks) + if err = ir.Series(p.At(), &lbls, &chks); err != nil { + exitWithError(err) + } // Amount of the block time range not covered by this series. uncovered := uint64(meta.MaxTime-meta.MinTime) - uint64(chks[len(chks)-1].MaxTime-chks[0].MinTime) for _, lbl := range lbls { key := lbl.Name + "=" + lbl.Value labelsUncovered[lbl.Name] += uncovered labelpairsUncovered[key] += uncovered - labelpairsCount[key] += 1 - entries += 1 + labelpairsCount[key]++ + entries++ } } if p.Err() != nil { diff --git a/db_test.go b/db_test.go index e3204ac37..92d487eec 100644 --- a/db_test.go +++ b/db_test.go @@ -1522,6 +1522,7 @@ func TestBlockRanges(t *testing.T) { _, err = app.Add(lbl, secondBlockMaxt+3, rand.Float64()) testutil.Ok(t, err) _, err = app.Add(lbl, secondBlockMaxt+4, rand.Float64()) + testutil.Ok(t, err) testutil.Ok(t, app.Commit()) testutil.Ok(t, db.Close()) diff --git a/head.go b/head.go index eba1f2dc4..917e2b071 100644 --- a/head.go +++ b/head.go @@ -685,6 +685,7 @@ func (h *Head) getAppendBuffer() []RefSample { } func (h *Head) putAppendBuffer(b []RefSample) { + //lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty. h.appendPool.Put(b[:0]) } @@ -697,6 +698,7 @@ func (h *Head) getBytesBuffer() []byte { } func (h *Head) putBytesBuffer(b []byte) { + //lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty. h.bytesPool.Put(b[:0]) } diff --git a/head_test.go b/head_test.go index ca2c49365..8781f677a 100644 --- a/head_test.go +++ b/head_test.go @@ -390,8 +390,8 @@ Outer: func TestDeleteUntilCurMax(t *testing.T) { numSamples := int64(10) hb, err := NewHead(nil, nil, nil, 1000000) - defer hb.Close() testutil.Ok(t, err) + defer hb.Close() app := hb.Appender() smpls := make([]float64, numSamples) for i := int64(0); i < numSamples; i++ { @@ -677,7 +677,7 @@ func TestMemSeries_append(t *testing.T) { ok, chunkCreated = s.append(1000, 3) testutil.Assert(t, ok, "append failed") - testutil.Assert(t, ok, "expected new chunk on boundary") + testutil.Assert(t, chunkCreated, "expected new chunk on boundary") ok, chunkCreated = s.append(1001, 4) testutil.Assert(t, ok, "append failed") diff --git a/index/index_test.go b/index/index_test.go index f7a815622..f915bca28 100644 --- a/index/index_test.go +++ b/index/index_test.go @@ -339,6 +339,7 @@ func TestPersistence_index_e2e(t *testing.T) { testutil.Ok(t, err) expp, err := mi.Postings(p.Name, p.Value) + testutil.Ok(t, err) var lset, explset labels.Labels var chks, expchks []chunks.Meta @@ -352,6 +353,7 @@ func TestPersistence_index_e2e(t *testing.T) { testutil.Ok(t, err) err = mi.Series(expp.At(), &explset, &expchks) + testutil.Ok(t, err) testutil.Equals(t, explset, lset) testutil.Equals(t, expchks, chks) } diff --git a/labels/labels_test.go b/labels/labels_test.go index 5ad573ce9..c49f66edf 100644 --- a/labels/labels_test.go +++ b/labels/labels_test.go @@ -150,7 +150,7 @@ func BenchmarkMapFromLabels(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - m = ls.Map() + _ = ls.Map() } } diff --git a/querier.go b/querier.go index c3dbc8f3c..4a5a40636 100644 --- a/querier.go +++ b/querier.go @@ -581,11 +581,9 @@ func (s *populatedChunkSeries) Next() bool { // This means that the chunk has be garbage collected. Remove it from the list. if s.err == ErrNotFound { s.err = nil - // Delete in-place. - chks = append(chks[:j], chks[j+1:]...) + s.chks = append(chks[:j], chks[j+1:]...) } - return false } } diff --git a/repair_test.go b/repair_test.go index b3bf0acba..5fb780a5b 100644 --- a/repair_test.go +++ b/repair_test.go @@ -65,7 +65,7 @@ func TestRepairBadIndexVersion(t *testing.T) { // Check the current db. // In its current state, lookups should fail with the fixed code. - meta, err := readMetaFile(dbDir) + _, err := readMetaFile(dbDir) testutil.NotOk(t, err) // Touch chunks dir in block. @@ -116,7 +116,7 @@ func TestRepairBadIndexVersion(t *testing.T) { {{"a", "2"}, {"b", "1"}}, }, res) - meta, err = readMetaFile(tmpDbDir) + meta, err := readMetaFile(tmpDbDir) testutil.Ok(t, err) testutil.Assert(t, meta.Version == 1, "unexpected meta version %d", meta.Version) } diff --git a/staticcheck.conf b/staticcheck.conf new file mode 100644 index 000000000..3266a2e29 --- /dev/null +++ b/staticcheck.conf @@ -0,0 +1,2 @@ +# Enable only "legacy" staticcheck verifications. +checks = [ "SA*" ] diff --git a/wal.go b/wal.go index f65c7a3a4..60e1c5807 100644 --- a/wal.go +++ b/wal.go @@ -888,16 +888,19 @@ func (r *walReader) Read( if seriesf != nil { seriesf(v) } + //lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty. seriesPool.Put(v[:0]) case []RefSample: if samplesf != nil { samplesf(v) } + //lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty. samplePool.Put(v[:0]) case []Stone: if deletesf != nil { deletesf(v) } + //lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty. deletePool.Put(v[:0]) default: level.Error(r.logger).Log("msg", "unexpected data type") diff --git a/wal_test.go b/wal_test.go index 5e880f5f1..fcda65b41 100644 --- a/wal_test.go +++ b/wal_test.go @@ -290,7 +290,7 @@ func TestWALRestoreCorrupted_invalidSegment(t *testing.T) { testutil.Ok(t, wal.Close()) - wal, err = OpenSegmentWAL(dir, log.NewLogfmtLogger(os.Stderr), 0, nil) + _, err = OpenSegmentWAL(dir, log.NewLogfmtLogger(os.Stderr), 0, nil) testutil.Ok(t, err) fns, err := fileutil.ReadDir(dir)