use a db copy instead of creating it again.

Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com>
This commit is contained in:
Krasi Georgiev 2019-01-24 14:15:32 +02:00
parent 8ffd705346
commit 1b0d85bbf2

View file

@ -15,7 +15,6 @@ package tsdb
import (
"context"
"fmt"
"io/ioutil"
"math"
"os"
@ -28,6 +27,7 @@ import (
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/fileutil"
"github.com/prometheus/tsdb/labels"
"github.com/prometheus/tsdb/testutil"
)
@ -748,70 +748,76 @@ func TestDisableAutoCompactions(t *testing.T) {
// TestCancelCompactions ensures that when the db is closed
// any running compaction is cancelled to unblock closing the db.
func TestCancelCompactions(t *testing.T) {
createTestDb := func() (*DB, func()) {
tmpdir, err := ioutil.TempDir("", "testCancelCompaction")
testutil.Ok(t, err)
tmpdir, err := ioutil.TempDir("", "testCancelCompaction")
testutil.Ok(t, err)
defer os.RemoveAll(tmpdir)
// Create some blocks to fall within the compaction range.
createBlock(t, tmpdir, 10000, 0, 1000)
createBlock(t, tmpdir, 10000, 1000, 2000)
createBlock(t, tmpdir, 1, 2000, 2001) // The most recent block is ignored so can be e small one.
db, err := Open(tmpdir, log.NewNopLogger(), nil, &Options{BlockRanges: []int64{1, 2000}})
testutil.Ok(t, err)
testutil.Equals(t, 3, len(db.Blocks()), "initial block count mismatch")
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
// Create some blocks to fall within the compaction range.
createBlock(t, tmpdir, 7000, 0, 1000)
createBlock(t, tmpdir, 7000, 1000, 2000)
createBlock(t, tmpdir, 1, 2000, 2001) // The most recent block is ignored so can be e small one.
return db, func() {
os.RemoveAll(tmpdir)
}
}
// First lets mesure the compaction time without interupting it.
// Copy the db so we have an exact copy to compare compaction times.
tmpdirCopy := tmpdir + "Copy"
err = fileutil.CopyDirs(tmpdir, tmpdirCopy)
testutil.Ok(t, err)
defer os.RemoveAll(tmpdirCopy)
// Measure the compaction time without interupting it.
var timeCompactionUninterrupted time.Duration
{
db, delete := createTestDb()
defer delete()
db.compactc <- struct{}{} // Trigger a compaction.
var start time.Time
for {
if prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.populatingBlocks) > 0 {
start = time.Now()
break
}
time.Sleep(3 * time.Millisecond)
}
for {
if prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran) == 1 {
timeCompactionUninterrupted = time.Since(start)
break
}
time.Sleep(3 * time.Millisecond)
}
}
// Closing the db in the middle of compaction should take half the time.
{
db, delete := createTestDb()
defer delete()
db.compactc <- struct{}{} // Trigger a compaction.
dbClosed := make(chan struct{})
for {
if prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.populatingBlocks) > 0 {
db, err := Open(tmpdir, log.NewNopLogger(), nil, &Options{BlockRanges: []int64{1, 2000}})
testutil.Ok(t, err)
db.DisableCompactions()
testutil.Equals(t, 3, len(db.Blocks()), "initial block count mismatch")
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
go func() {
var start time.Time
for {
if prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.populatingBlocks) > 0 {
start = time.Now()
break
}
time.Sleep(3 * time.Millisecond)
go func() {
testutil.Ok(t, db.Close())
close(dbClosed)
}()
break
}
}
start := time.Now()
<-dbClosed
actT := time.Since(start)
fmt.Println(timeCompactionUninterrupted)
fmt.Println(actT)
expT := time.Duration(timeCompactionUninterrupted / 2)
testutil.Assert(t, actT < expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT)
for {
if prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran) == 1 {
timeCompactionUninterrupted = time.Since(start)
break
}
time.Sleep(3 * time.Millisecond)
}
testutil.Ok(t, db.Close())
}()
db.compact()
}
// Measure the compaction time when closing the db in the middle of compaction.
{
db, err := Open(tmpdirCopy, log.NewNopLogger(), nil, &Options{BlockRanges: []int64{1, 2000}})
testutil.Ok(t, err)
db.DisableCompactions()
testutil.Equals(t, 3, len(db.Blocks()), "initial block count mismatch")
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
go func() {
dbClosed := make(chan struct{})
for {
if prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.populatingBlocks) > 0 {
time.Sleep(3 * time.Millisecond)
go func() {
testutil.Ok(t, db.Close())
close(dbClosed)
}()
break
}
}
start := time.Now()
<-dbClosed
actT := time.Since(start)
expT := time.Duration(timeCompactionUninterrupted / 2) // Closing the db in the middle of compaction should less than half the time.
testutil.Assert(t, actT < expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT)
}()
db.compact()
}
}