Do not break exported functions signatures (#6)

Signed-off-by: Marco Pracucci <marco@pracucci.com>
This commit is contained in:
Marco Pracucci 2021-08-20 18:37:47 +02:00 committed by GitHub
parent 481299f4a5
commit fbe211d3a8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 53 additions and 38 deletions

View file

@ -283,7 +283,12 @@ type Block struct {
// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used
// to instantiate chunk structs.
func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool, cache index.ReaderCacheProvider) (pb *Block, err error) {
func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) {
return OpenBlockWithCache(logger, dir, pool, nil)
}
// OpenBlockWithCache is like OpenBlock but allows to pass a cache provider.
func OpenBlockWithCache(logger log.Logger, dir string, pool chunkenc.Pool, cache index.ReaderCacheProvider) (pb *Block, err error) {
if logger == nil {
logger = log.NewNopLogger()
}
@ -304,7 +309,7 @@ func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool, cache index.Re
}
closers = append(closers, cr)
ir, err := index.NewFileReader(filepath.Join(dir, indexFilename), cache)
ir, err := index.NewFileReaderWithCache(filepath.Join(dir, indexFilename), cache)
if err != nil {
return nil, err
}

View file

@ -64,14 +64,14 @@ func TestSetCompactionFailed(t *testing.T) {
}()
blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 1))
b, err := OpenBlock(nil, blockDir, nil, nil)
b, err := OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
require.Equal(t, false, b.meta.Compaction.Failed)
require.NoError(t, b.setCompactionFailed())
require.Equal(t, true, b.meta.Compaction.Failed)
require.NoError(t, b.Close())
b, err = OpenBlock(nil, blockDir, nil, nil)
b, err = OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
require.Equal(t, true, b.meta.Compaction.Failed)
require.NoError(t, b.Close())
@ -83,7 +83,7 @@ func TestCreateBlock(t *testing.T) {
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
b, err := OpenBlock(nil, createBlock(t, tmpdir, genSeries(1, 1, 0, 10)), nil, nil)
b, err := OpenBlock(nil, createBlock(t, tmpdir, genSeries(1, 1, 0, 10)), nil)
if err == nil {
require.NoError(t, b.Close())
}
@ -193,7 +193,7 @@ func TestCorruptedChunk(t *testing.T) {
require.NoError(t, f.Close())
// Check open err.
b, err := OpenBlock(nil, blockDir, nil, nil)
b, err := OpenBlock(nil, blockDir, nil)
if tc.openErr != nil {
require.Equal(t, tc.openErr.Error(), err.Error())
return
@ -235,7 +235,7 @@ func TestLabelValuesWithMatchers(t *testing.T) {
require.Greater(t, len(files), 0, "No chunk created.")
// Check open err.
block, err := OpenBlock(nil, blockDir, nil, nil)
block, err := OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
defer func() { require.NoError(t, block.Close()) }()
@ -303,7 +303,7 @@ func TestBlockSize(t *testing.T) {
// Create a block and compare the reported size vs actual disk size.
{
blockDirInit = createBlock(t, tmpdir, genSeries(10, 1, 1, 100))
blockInit, err = OpenBlock(nil, blockDirInit, nil, nil)
blockInit, err = OpenBlock(nil, blockDirInit, nil)
require.NoError(t, err)
defer func() {
require.NoError(t, blockInit.Close())
@ -327,7 +327,7 @@ func TestBlockSize(t *testing.T) {
require.NoError(t, err)
blockDirAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil)
require.NoError(t, err)
blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirAfterCompact.String()), nil, nil)
blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirAfterCompact.String()), nil)
require.NoError(t, err)
defer func() {
require.NoError(t, blockAfterCompact.Close())
@ -358,7 +358,7 @@ func TestReadIndexFormatV1(t *testing.T) {
*/
blockDir := filepath.Join("testdata", "index_format_v1")
block, err := OpenBlock(nil, blockDir, nil, nil)
block, err := OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
q, err := NewBlockQuerier(block, 0, 1000)
@ -398,7 +398,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
require.Greater(b, len(files), 0, "No chunk created.")
// Check open err.
block, err := OpenBlock(nil, blockDir, nil, nil)
block, err := OpenBlock(nil, blockDir, nil)
require.NoError(b, err)
defer func() { require.NoError(b, block.Close()) }()
@ -452,7 +452,7 @@ func TestLabelNamesWithMatchers(t *testing.T) {
require.Greater(t, len(files), 0, "No chunk created.")
// Check open err.
block, err := OpenBlock(nil, blockDir, nil, nil)
block, err := OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, block.Close()) })

View file

@ -50,7 +50,7 @@ func TestBlockWriter(t *testing.T) {
// Confirm the block has the correct data.
blockpath := filepath.Join(outputDir, id.String())
b, err := OpenBlock(nil, blockpath, nil, nil)
b, err := OpenBlock(nil, blockpath, nil)
require.NoError(t, err)
defer func() { require.NoError(t, b.Close()) }()
q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64)

View file

@ -418,7 +418,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u
if b == nil {
var err error
b, err = OpenBlock(c.logger, d, c.chunkPool, nil)
b, err = OpenBlock(c.logger, d, c.chunkPool)
if err != nil {
return uid, err
}

View file

@ -1056,7 +1056,7 @@ func BenchmarkCompaction(b *testing.B) {
blockDirs := make([]string, 0, len(c.ranges))
var blocks []*Block
for _, r := range c.ranges {
block, err := OpenBlock(nil, createBlock(b, dir, genSeries(nSeries, 10, r[0], r[1])), nil, nil)
block, err := OpenBlock(nil, createBlock(b, dir, genSeries(nSeries, 10, r[0], r[1])), nil)
require.NoError(b, err)
blocks = append(blocks, block)
defer func() {

View file

@ -1174,7 +1174,7 @@ func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Po
cacheProvider = cache.GetBlockCacheProvider(meta.ULID.String())
}
block, err = OpenBlock(l, bDir, chunkPool, cacheProvider)
block, err = OpenBlockWithCache(l, bDir, chunkPool, cacheProvider)
if err != nil {
corrupted[meta.ULID] = err
continue

View file

@ -1168,7 +1168,7 @@ func TestTombstoneCleanFail(t *testing.T) {
totalBlocks := 2
for i := 0; i < totalBlocks; i++ {
blockDir := createBlock(t, db.Dir(), genSeries(1, 1, int64(i), int64(i)+1))
block, err := OpenBlock(nil, blockDir, nil, nil)
block, err := OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
// Add some fake tombstones to trigger the compaction.
tomb := tombstones.NewMemTombstones()
@ -1218,7 +1218,7 @@ func TestTombstoneCleanRetentionLimitsRace(t *testing.T) {
// Generate some blocks with old mint (near epoch).
for j := 0; j < totalBlocks; j++ {
blockDir := createBlock(t, dbDir, genSeries(10, 1, int64(j), int64(j)+1))
block, err := OpenBlock(nil, blockDir, nil, nil)
block, err := OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
// Cover block with tombstones so it can be deleted with CleanTombstones() as well.
tomb := tombstones.NewMemTombstones()
@ -1277,7 +1277,7 @@ func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int6
return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail")
}
block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil, nil)
block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil)
require.NoError(c.t, err)
require.NoError(c.t, block.Close()) // Close block as we won't be using anywhere.
c.blocks = append(c.blocks, block)

View file

@ -1099,12 +1099,22 @@ func (b realByteSlice) Sub(start, end int) ByteSlice {
// NewReader returns a new index reader on the given byte slice. It automatically
// handles different format versions.
func NewReader(b ByteSlice, cacheProvider ReaderCacheProvider) (*Reader, error) {
func NewReader(b ByteSlice) (*Reader, error) {
return newReader(b, ioutil.NopCloser(nil), nil)
}
// NewReaderWithCache is like NewReader but allows to pass a cache provider.
func NewReaderWithCache(b ByteSlice, cacheProvider ReaderCacheProvider) (*Reader, error) {
return newReader(b, ioutil.NopCloser(nil), cacheProvider)
}
// NewFileReader returns a new index reader against the given index file.
func NewFileReader(path string, cacheProvider ReaderCacheProvider) (*Reader, error) {
func NewFileReader(path string) (*Reader, error) {
return NewFileReaderWithCache(path, nil)
}
// NewFileReaderWithCache is like NewFileReader but allows to pass a cache provider.
func NewFileReaderWithCache(path string, cacheProvider ReaderCacheProvider) (*Reader, error) {
f, err := fileutil.OpenMmapFile(path)
if err != nil {
return nil, err

View file

@ -150,7 +150,7 @@ func TestIndexRW_Create_Open(t *testing.T) {
require.NoError(t, err)
require.NoError(t, iw.Close())
ir, err := NewFileReader(fn, nil)
ir, err := NewFileReader(fn)
require.NoError(t, err)
require.NoError(t, ir.Close())
@ -161,7 +161,7 @@ func TestIndexRW_Create_Open(t *testing.T) {
require.NoError(t, err)
f.Close()
_, err = NewFileReader(dir, nil)
_, err = NewFileReader(dir)
require.Error(t, err)
}
@ -200,7 +200,7 @@ func TestIndexRW_Postings(t *testing.T) {
require.NoError(t, iw.Close())
ir, err := NewFileReader(fn, nil)
ir, err := NewFileReader(fn)
require.NoError(t, err)
p, err := ir.Postings("a", "1")
@ -254,7 +254,7 @@ func TestIndexRW_Postings(t *testing.T) {
cache = hashcache.NewSeriesHashCache(1024 * 1024 * 1024).GetBlockCacheProvider("test")
}
ir, err := NewFileReader(fn, cache)
ir, err := NewFileReaderWithCache(fn, cache)
require.NoError(t, err)
// List all postings for a given label value. This is what we expect to get
@ -343,7 +343,7 @@ func TestPostingsMany(t *testing.T) {
}
require.NoError(t, iw.Close())
ir, err := NewFileReader(fn, nil)
ir, err := NewFileReader(fn)
require.NoError(t, err)
defer func() { require.NoError(t, ir.Close()) }()
@ -481,7 +481,7 @@ func TestPersistence_index_e2e(t *testing.T) {
err = iw.Close()
require.NoError(t, err)
ir, err := NewFileReader(filepath.Join(dir, indexFilename), nil)
ir, err := NewFileReader(filepath.Join(dir, indexFilename))
require.NoError(t, err)
for p := range mi.postings {
@ -553,7 +553,7 @@ func TestDecbufUvarintWithInvalidBuffer(t *testing.T) {
func TestReaderWithInvalidBuffer(t *testing.T) {
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
_, err := NewReader(b, nil)
_, err := NewReader(b)
require.Error(t, err)
}
@ -565,7 +565,7 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) {
err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0666)
require.NoError(t, err)
_, err = NewFileReader(idxName, nil)
_, err = NewFileReader(idxName)
require.Error(t, err)
// dir.Close will fail on Win if idxName fd is not closed on error path.
@ -660,7 +660,7 @@ func BenchmarkReader_ShardedPostings(b *testing.B) {
}
// Create a reader to read back all postings from the index.
ir, err := NewFileReader(fn, cache)
ir, err := NewFileReaderWithCache(fn, cache)
require.NoError(b, err)
b.ResetTimer()

View file

@ -78,7 +78,7 @@ func BenchmarkPostingsForMatchers(b *testing.B) {
}()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil, nil)
block, err := OpenBlock(nil, blockdir, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
@ -220,7 +220,7 @@ func BenchmarkQuerierSelect(b *testing.B) {
seriesHashCache := hashcache.NewSeriesHashCache(1024 * 1024 * 1024)
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil, seriesHashCache.GetBlockCacheProvider("test"))
block, err := OpenBlockWithCache(nil, blockdir, nil, seriesHashCache.GetBlockCacheProvider("test"))
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())

View file

@ -1323,7 +1323,7 @@ func BenchmarkQueryIterator(b *testing.B) {
} else {
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
}
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil)
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
require.NoError(b, err)
blocks = append(blocks, block)
defer block.Close()
@ -1390,7 +1390,7 @@ func BenchmarkQuerySeek(b *testing.B) {
} else {
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
}
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil)
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
require.NoError(b, err)
blocks = append(blocks, block)
defer block.Close()
@ -1529,7 +1529,7 @@ func BenchmarkSetMatcher(b *testing.B) {
} else {
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
}
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil)
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
require.NoError(b, err)
blocks = append(blocks, block)
defer block.Close()
@ -1984,7 +1984,7 @@ func BenchmarkQueries(b *testing.B) {
qs := make([]storage.Querier, 0, 10)
for x := 0; x <= 10; x++ {
block, err := OpenBlock(nil, createBlock(b, dir, series), nil, nil)
block, err := OpenBlock(nil, createBlock(b, dir, series), nil)
require.NoError(b, err)
q, err := NewBlockQuerier(block, 1, int64(nSamples))
require.NoError(b, err)

View file

@ -81,7 +81,7 @@ func TestRepairBadIndexVersion(t *testing.T) {
require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0777))
// Read current index to check integrity.
r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename), nil)
r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename))
require.NoError(t, err)
p, err := r.Postings("b", "1")
require.NoError(t, err)
@ -99,7 +99,7 @@ func TestRepairBadIndexVersion(t *testing.T) {
require.NoError(t, err)
db.Close()
r, err = index.NewFileReader(filepath.Join(tmpDbDir, indexFilename), nil)
r, err = index.NewFileReader(filepath.Join(tmpDbDir, indexFilename))
require.NoError(t, err)
defer r.Close()
p, err = r.Postings("b", "1")