2020-03-19 09:33:44 -07:00
|
|
|
// Copyright 2020 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package chunks
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
2020-08-26 10:59:18 -07:00
|
|
|
"errors"
|
2020-03-19 09:33:44 -07:00
|
|
|
"io/ioutil"
|
|
|
|
"math/rand"
|
|
|
|
"os"
|
2020-10-21 05:57:13 -07:00
|
|
|
"strconv"
|
2020-03-19 09:33:44 -07:00
|
|
|
"testing"
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-10-22 02:00:08 -07:00
|
|
|
|
2020-03-19 09:33:44 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
|
|
|
)
|
|
|
|
|
2020-10-21 06:11:39 -07:00
|
|
|
func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
|
|
|
|
hrw := testChunkDiskMapper(t)
|
2020-03-19 09:33:44 -07:00
|
|
|
defer func() {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Close())
|
2020-03-19 09:33:44 -07:00
|
|
|
}()
|
|
|
|
|
|
|
|
expectedBytes := []byte{}
|
|
|
|
nextChunkOffset := uint64(HeadChunkFileHeaderSize)
|
|
|
|
chkCRC32 := newCRC32()
|
|
|
|
|
|
|
|
type expectedDataType struct {
|
|
|
|
seriesRef, chunkRef uint64
|
|
|
|
mint, maxt int64
|
2020-05-06 08:30:00 -07:00
|
|
|
numSamples uint16
|
2020-03-19 09:33:44 -07:00
|
|
|
chunk chunkenc.Chunk
|
|
|
|
}
|
|
|
|
expectedData := []expectedDataType{}
|
|
|
|
|
|
|
|
var buf [MaxHeadChunkMetaSize]byte
|
|
|
|
totalChunks := 0
|
|
|
|
var firstFileName string
|
|
|
|
for hrw.curFileSequence < 3 || hrw.chkWriter.Buffered() == 0 {
|
2020-05-29 07:38:41 -07:00
|
|
|
addChunks := func(numChunks int) {
|
|
|
|
for i := 0; i < numChunks; i++ {
|
|
|
|
seriesRef, chkRef, mint, maxt, chunk := createChunk(t, totalChunks, hrw)
|
|
|
|
totalChunks++
|
|
|
|
expectedData = append(expectedData, expectedDataType{
|
|
|
|
seriesRef: seriesRef,
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
chunkRef: chkRef,
|
|
|
|
chunk: chunk,
|
|
|
|
numSamples: uint16(chunk.NumSamples()),
|
|
|
|
})
|
|
|
|
|
|
|
|
if hrw.curFileSequence != 1 {
|
|
|
|
// We are checking for bytes written only for the first file.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculating expected bytes written on disk for first file.
|
|
|
|
firstFileName = hrw.curFile.Name()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, chunkRef(1, nextChunkOffset), chkRef)
|
2020-05-29 07:38:41 -07:00
|
|
|
|
|
|
|
bytesWritten := 0
|
|
|
|
chkCRC32.Reset()
|
|
|
|
|
|
|
|
binary.BigEndian.PutUint64(buf[bytesWritten:], seriesRef)
|
|
|
|
bytesWritten += SeriesRefSize
|
|
|
|
binary.BigEndian.PutUint64(buf[bytesWritten:], uint64(mint))
|
|
|
|
bytesWritten += MintMaxtSize
|
|
|
|
binary.BigEndian.PutUint64(buf[bytesWritten:], uint64(maxt))
|
|
|
|
bytesWritten += MintMaxtSize
|
|
|
|
buf[bytesWritten] = byte(chunk.Encoding())
|
|
|
|
bytesWritten += ChunkEncodingSize
|
|
|
|
n := binary.PutUvarint(buf[bytesWritten:], uint64(len(chunk.Bytes())))
|
|
|
|
bytesWritten += n
|
|
|
|
|
|
|
|
expectedBytes = append(expectedBytes, buf[:bytesWritten]...)
|
|
|
|
_, err := chkCRC32.Write(buf[:bytesWritten])
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-05-29 07:38:41 -07:00
|
|
|
expectedBytes = append(expectedBytes, chunk.Bytes()...)
|
|
|
|
_, err = chkCRC32.Write(chunk.Bytes())
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-05-29 07:38:41 -07:00
|
|
|
|
|
|
|
expectedBytes = append(expectedBytes, chkCRC32.Sum(nil)...)
|
|
|
|
|
|
|
|
// += seriesRef, mint, maxt, encoding, chunk data len, chunk data, CRC.
|
|
|
|
nextChunkOffset += SeriesRefSize + 2*MintMaxtSize + ChunkEncodingSize + uint64(n) + uint64(len(chunk.Bytes())) + CRCSize
|
2020-03-19 09:33:44 -07:00
|
|
|
}
|
|
|
|
}
|
2020-05-29 07:38:41 -07:00
|
|
|
addChunks(100)
|
|
|
|
hrw.CutNewFile()
|
|
|
|
addChunks(10) // For chunks in in-memory buffer.
|
2020-03-19 09:33:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Checking on-disk bytes for the first file.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 3, len(hrw.mmappedChunkFiles), "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
|
|
|
|
require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
actualBytes, err := ioutil.ReadFile(firstFileName)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
// Check header of the segment file.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, MagicHeadChunks, int(binary.BigEndian.Uint32(actualBytes[0:MagicChunksSize])))
|
|
|
|
require.Equal(t, chunksFormatV1, int(actualBytes[MagicChunksSize]))
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
// Remaining chunk data.
|
|
|
|
fileEnd := HeadChunkFileHeaderSize + len(expectedBytes)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, expectedBytes, actualBytes[HeadChunkFileHeaderSize:fileEnd])
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
// Test for the next chunk header to be all 0s. That marks the end of the file.
|
|
|
|
for _, b := range actualBytes[fileEnd : fileEnd+MaxHeadChunkMetaSize] {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, byte(0), b)
|
2020-03-19 09:33:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Testing reading of chunks.
|
|
|
|
for _, exp := range expectedData {
|
|
|
|
actChunk, err := hrw.Chunk(exp.chunkRef)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, exp.chunk.Bytes(), actChunk.Bytes())
|
2020-03-19 09:33:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Testing IterateAllChunks method.
|
|
|
|
dir := hrw.dir.Name()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Close())
|
2020-11-19 05:00:47 -08:00
|
|
|
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
idx := 0
|
2020-05-06 08:30:00 -07:00
|
|
|
err = hrw.IterateAllChunks(func(seriesRef, chunkRef uint64, mint, maxt int64, numSamples uint16) error {
|
2020-03-19 09:33:44 -07:00
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
expData := expectedData[idx]
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, expData.seriesRef, seriesRef)
|
|
|
|
require.Equal(t, expData.chunkRef, chunkRef)
|
|
|
|
require.Equal(t, expData.maxt, maxt)
|
|
|
|
require.Equal(t, expData.maxt, maxt)
|
|
|
|
require.Equal(t, expData.numSamples, numSamples)
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
actChunk, err := hrw.Chunk(expData.chunkRef)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, expData.chunk.Bytes(), actChunk.Bytes())
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
idx++
|
|
|
|
return nil
|
|
|
|
})
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(expectedData), idx)
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-10-21 06:11:39 -07:00
|
|
|
// TestChunkDiskMapper_Truncate tests
|
2020-06-18 06:54:58 -07:00
|
|
|
// * If truncation is happening properly based on the time passed.
|
|
|
|
// * The active file is not deleted even if the passed time makes it eligible to be deleted.
|
|
|
|
// * Empty current file does not lead to creation of another file after truncation.
|
|
|
|
// * Non-empty current file leads to creation of another file after truncation.
|
2020-10-21 06:11:39 -07:00
|
|
|
func TestChunkDiskMapper_Truncate(t *testing.T) {
|
|
|
|
hrw := testChunkDiskMapper(t)
|
2020-03-19 09:33:44 -07:00
|
|
|
defer func() {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Close())
|
2020-03-19 09:33:44 -07:00
|
|
|
}()
|
|
|
|
|
|
|
|
timeRange := 0
|
|
|
|
fileTimeStep := 100
|
2020-10-21 06:11:39 -07:00
|
|
|
var thirdFileMinT, sixthFileMinT int64
|
2020-03-19 09:33:44 -07:00
|
|
|
|
2020-05-29 07:38:41 -07:00
|
|
|
addChunk := func() int {
|
2020-09-20 10:42:01 -07:00
|
|
|
mint := timeRange + 1 // Just after the new file cut.
|
2020-03-19 09:33:44 -07:00
|
|
|
maxt := timeRange + fileTimeStep - 1 // Just before the next file.
|
|
|
|
|
|
|
|
// Write a chunks to set maxt for the segment.
|
|
|
|
_, err := hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-03-19 09:33:44 -07:00
|
|
|
|
2020-05-29 07:38:41 -07:00
|
|
|
timeRange += fileTimeStep
|
|
|
|
|
|
|
|
return mint
|
|
|
|
}
|
|
|
|
|
2020-10-21 06:11:39 -07:00
|
|
|
verifyFiles := func(remainingFiles []int) {
|
2020-06-18 06:54:58 -07:00
|
|
|
t.Helper()
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
files, err := ioutil.ReadDir(hrw.dir.Name())
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(remainingFiles), len(files), "files on disk")
|
|
|
|
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
|
|
|
|
require.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
|
2020-03-19 09:33:44 -07:00
|
|
|
|
2020-10-21 06:11:39 -07:00
|
|
|
for _, i := range remainingFiles {
|
2020-03-19 09:33:44 -07:00
|
|
|
_, ok := hrw.mmappedChunkFiles[i]
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, true, ok)
|
2020-03-19 09:33:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 06:11:39 -07:00
|
|
|
// Create segments 1 to 7.
|
|
|
|
for i := 1; i <= 7; i++ {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.CutNewFile())
|
2020-10-21 06:11:39 -07:00
|
|
|
mint := int64(addChunk())
|
|
|
|
if i == 3 {
|
|
|
|
thirdFileMinT = mint
|
|
|
|
} else if i == 6 {
|
|
|
|
sixthFileMinT = mint
|
|
|
|
}
|
|
|
|
}
|
|
|
|
verifyFiles([]int{1, 2, 3, 4, 5, 6, 7})
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
// Truncating files.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Truncate(thirdFileMinT))
|
2020-10-21 06:11:39 -07:00
|
|
|
verifyFiles([]int{3, 4, 5, 6, 7, 8})
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
dir := hrw.dir.Name()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Close())
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
// Restarted.
|
|
|
|
var err error
|
2020-11-19 05:00:47 -08:00
|
|
|
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-03-19 09:33:44 -07:00
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.False(t, hrw.fileMaxtSet)
|
|
|
|
require.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
|
|
|
|
require.True(t, hrw.fileMaxtSet)
|
2020-03-19 09:33:44 -07:00
|
|
|
|
2020-10-21 06:11:39 -07:00
|
|
|
verifyFiles([]int{3, 4, 5, 6, 7, 8})
|
|
|
|
// New file is created after restart even if last file was empty.
|
|
|
|
addChunk()
|
|
|
|
verifyFiles([]int{3, 4, 5, 6, 7, 8, 9})
|
2020-06-18 06:54:58 -07:00
|
|
|
|
2020-10-21 06:11:39 -07:00
|
|
|
// Truncating files after restart.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Truncate(sixthFileMinT))
|
2020-10-21 06:11:39 -07:00
|
|
|
verifyFiles([]int{6, 7, 8, 9, 10})
|
|
|
|
|
|
|
|
// As the last file was empty, this creates no new files.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Truncate(sixthFileMinT+1))
|
2020-10-21 06:11:39 -07:00
|
|
|
verifyFiles([]int{6, 7, 8, 9, 10})
|
2020-06-18 06:54:58 -07:00
|
|
|
addChunk()
|
2020-03-19 09:33:44 -07:00
|
|
|
|
|
|
|
// Truncating till current time should not delete the current active file.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Truncate(int64(timeRange+(2*fileTimeStep))))
|
2020-10-21 06:11:39 -07:00
|
|
|
verifyFiles([]int{10, 11}) // One file is the previously active file and one currently created.
|
2020-06-18 06:54:58 -07:00
|
|
|
}
|
|
|
|
|
2020-10-21 06:11:39 -07:00
|
|
|
// TestChunkDiskMapper_Truncate_PreservesFileSequence tests that truncation doesn't poke
|
|
|
|
// holes into the file sequence, even if there are empty files in between non-empty files.
|
|
|
|
// This test exposes https://github.com/prometheus/prometheus/issues/7412 where the truncation
|
|
|
|
// simply deleted all empty files instead of stopping once it encountered a non-empty file.
|
|
|
|
func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
|
|
|
|
hrw := testChunkDiskMapper(t)
|
2020-06-18 06:54:58 -07:00
|
|
|
defer func() {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Close())
|
2020-06-18 06:54:58 -07:00
|
|
|
}()
|
|
|
|
|
|
|
|
timeRange := 0
|
|
|
|
addChunk := func() {
|
|
|
|
step := 100
|
|
|
|
mint, maxt := timeRange+1, timeRange+step-1
|
|
|
|
_, err := hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-06-18 06:54:58 -07:00
|
|
|
timeRange += step
|
|
|
|
}
|
|
|
|
emptyFile := func() {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.CutNewFile())
|
2020-06-18 06:54:58 -07:00
|
|
|
}
|
|
|
|
nonEmptyFile := func() {
|
|
|
|
emptyFile()
|
|
|
|
addChunk()
|
|
|
|
}
|
|
|
|
|
|
|
|
addChunk() // 1. Created with the first chunk.
|
|
|
|
nonEmptyFile() // 2.
|
|
|
|
nonEmptyFile() // 3.
|
|
|
|
emptyFile() // 4.
|
|
|
|
nonEmptyFile() // 5.
|
|
|
|
emptyFile() // 6.
|
|
|
|
|
|
|
|
verifyFiles := func(remainingFiles []int) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
files, err := ioutil.ReadDir(hrw.dir.Name())
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(remainingFiles), len(files), "files on disk")
|
|
|
|
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
|
|
|
|
require.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
|
2020-06-18 06:54:58 -07:00
|
|
|
|
|
|
|
for _, i := range remainingFiles {
|
|
|
|
_, ok := hrw.mmappedChunkFiles[i]
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, ok, "remaining file %d not in hrw.mmappedChunkFiles", i)
|
2020-06-18 06:54:58 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
verifyFiles([]int{1, 2, 3, 4, 5, 6})
|
|
|
|
|
|
|
|
// Truncating files till 2. It should not delete anything after 3 (inclusive)
|
|
|
|
// though files 4 and 6 are empty.
|
|
|
|
file2Maxt := hrw.mmappedChunkFiles[2].maxt
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Truncate(file2Maxt+1))
|
2020-06-18 06:54:58 -07:00
|
|
|
// As 6 was empty, it should not create another file.
|
|
|
|
verifyFiles([]int{3, 4, 5, 6})
|
|
|
|
|
|
|
|
addChunk()
|
|
|
|
// Truncate creates another file as 6 is not empty now.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Truncate(file2Maxt+1))
|
2020-06-18 06:54:58 -07:00
|
|
|
verifyFiles([]int{3, 4, 5, 6, 7})
|
|
|
|
|
|
|
|
dir := hrw.dir.Name()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Close())
|
2020-06-18 06:54:58 -07:00
|
|
|
|
|
|
|
// Restarting checks for unsequential files.
|
|
|
|
var err error
|
2020-11-19 05:00:47 -08:00
|
|
|
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-06-18 06:54:58 -07:00
|
|
|
verifyFiles([]int{3, 4, 5, 6, 7})
|
2020-03-19 09:33:44 -07:00
|
|
|
}
|
|
|
|
|
2020-08-26 10:59:18 -07:00
|
|
|
// TestHeadReadWriter_TruncateAfterIterateChunksError tests for
|
|
|
|
// https://github.com/prometheus/prometheus/issues/7753
|
|
|
|
func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
|
2020-10-21 06:11:39 -07:00
|
|
|
hrw := testChunkDiskMapper(t)
|
2020-08-26 10:59:18 -07:00
|
|
|
defer func() {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Close())
|
2020-08-26 10:59:18 -07:00
|
|
|
}()
|
|
|
|
|
|
|
|
// Write a chunks to iterate on it later.
|
|
|
|
_, err := hrw.WriteChunk(1, 0, 1000, randomChunk(t))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-08-26 10:59:18 -07:00
|
|
|
|
|
|
|
dir := hrw.dir.Name()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Close())
|
2020-08-26 10:59:18 -07:00
|
|
|
|
|
|
|
// Restarting to recreate https://github.com/prometheus/prometheus/issues/7753.
|
2020-11-19 05:00:47 -08:00
|
|
|
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-08-26 10:59:18 -07:00
|
|
|
|
|
|
|
// Forcefully failing IterateAllChunks.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Error(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error {
|
2020-08-26 10:59:18 -07:00
|
|
|
return errors.New("random error")
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Truncation call should not return error after IterateAllChunks fails.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Truncate(2000))
|
2020-08-26 10:59:18 -07:00
|
|
|
}
|
|
|
|
|
2020-10-21 05:57:13 -07:00
|
|
|
func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
|
2020-10-21 06:11:39 -07:00
|
|
|
hrw := testChunkDiskMapper(t)
|
2020-10-21 05:57:13 -07:00
|
|
|
defer func() {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Close())
|
2020-10-21 05:57:13 -07:00
|
|
|
}()
|
|
|
|
|
|
|
|
timeRange := 0
|
|
|
|
addChunk := func() {
|
|
|
|
step := 100
|
|
|
|
mint, maxt := timeRange+1, timeRange+step-1
|
|
|
|
_, err := hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-10-21 05:57:13 -07:00
|
|
|
timeRange += step
|
|
|
|
}
|
|
|
|
nonEmptyFile := func() {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.CutNewFile())
|
2020-10-21 05:57:13 -07:00
|
|
|
addChunk()
|
|
|
|
}
|
|
|
|
|
|
|
|
addChunk() // 1. Created with the first chunk.
|
|
|
|
nonEmptyFile() // 2.
|
|
|
|
nonEmptyFile() // 3.
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 3, len(hrw.mmappedChunkFiles))
|
2020-10-21 05:57:13 -07:00
|
|
|
lastFile := 0
|
|
|
|
for idx := range hrw.mmappedChunkFiles {
|
|
|
|
if idx > lastFile {
|
|
|
|
lastFile = idx
|
|
|
|
}
|
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 3, lastFile)
|
2020-10-21 05:57:13 -07:00
|
|
|
dir := hrw.dir.Name()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, hrw.Close())
|
2020-10-21 05:57:13 -07:00
|
|
|
|
|
|
|
// Write an empty last file mimicking an abrupt shutdown on file creation.
|
|
|
|
emptyFileName := segmentFile(dir, lastFile+1)
|
|
|
|
f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0666)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, f.Sync())
|
2020-10-21 05:57:13 -07:00
|
|
|
stat, err := f.Stat()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int64(0), stat.Size())
|
|
|
|
require.NoError(t, f.Close())
|
2020-10-21 05:57:13 -07:00
|
|
|
|
|
|
|
// Open chunk disk mapper again, corrupt file should be removed.
|
2020-11-19 05:00:47 -08:00
|
|
|
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.False(t, hrw.fileMaxtSet)
|
|
|
|
require.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
|
|
|
|
require.True(t, hrw.fileMaxtSet)
|
2020-10-21 05:57:13 -07:00
|
|
|
|
|
|
|
// Removed from memory.
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 3, len(hrw.mmappedChunkFiles))
|
2020-10-21 05:57:13 -07:00
|
|
|
for idx := range hrw.mmappedChunkFiles {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.LessOrEqual(t, idx, lastFile, "file index is bigger than previous last file")
|
2020-10-21 05:57:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Removed even from disk.
|
|
|
|
files, err := ioutil.ReadDir(dir)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 3, len(files))
|
2020-10-21 05:57:13 -07:00
|
|
|
for _, fi := range files {
|
|
|
|
seq, err := strconv.ParseUint(fi.Name(), 10, 64)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file")
|
2020-10-21 05:57:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-10-21 06:11:39 -07:00
|
|
|
func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper {
|
2020-03-19 09:33:44 -07:00
|
|
|
tmpdir, err := ioutil.TempDir("", "data")
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-08-26 10:59:18 -07:00
|
|
|
t.Cleanup(func() {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, os.RemoveAll(tmpdir))
|
2020-08-26 10:59:18 -07:00
|
|
|
})
|
|
|
|
|
2020-11-19 05:00:47 -08:00
|
|
|
hrw, err := NewChunkDiskMapper(tmpdir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.False(t, hrw.fileMaxtSet)
|
|
|
|
require.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
|
|
|
|
require.True(t, hrw.fileMaxtSet)
|
2020-08-26 10:59:18 -07:00
|
|
|
return hrw
|
2020-03-19 09:33:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func randomChunk(t *testing.T) chunkenc.Chunk {
|
|
|
|
chunk := chunkenc.NewXORChunk()
|
|
|
|
len := rand.Int() % 120
|
|
|
|
app, err := chunk.Appender()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-03-19 09:33:44 -07:00
|
|
|
for i := 0; i < len; i++ {
|
|
|
|
app.Append(rand.Int63(), rand.Float64())
|
|
|
|
}
|
|
|
|
return chunk
|
|
|
|
}
|
|
|
|
|
|
|
|
func createChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesRef uint64, chunkRef uint64, mint, maxt int64, chunk chunkenc.Chunk) {
|
|
|
|
var err error
|
|
|
|
seriesRef = uint64(rand.Int63())
|
|
|
|
mint = int64((idx)*1000 + 1)
|
|
|
|
maxt = int64((idx + 1) * 1000)
|
|
|
|
chunk = randomChunk(t)
|
|
|
|
chunkRef, err = hrw.WriteChunk(seriesRef, mint, maxt, chunk)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-03-19 09:33:44 -07:00
|
|
|
return
|
|
|
|
}
|