mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 05:34:05 -08:00
Merge pull request #250 from prometheus/refactor/drop-unused-storage-setting
Drop unused writeMemoryInterval
This commit is contained in:
commit
516101f015
2
main.go
2
main.go
|
@ -187,7 +187,7 @@ func main() {
|
|||
log.Fatalf("Error loading configuration from %s: %v", *configFile, err)
|
||||
}
|
||||
|
||||
ts, err := metric.NewTieredStorage(uint(*diskAppendQueueCapacity), 100, time.Second*30, time.Second*1, time.Second*20, *metricsStoragePath)
|
||||
ts, err := metric.NewTieredStorage(uint(*diskAppendQueueCapacity), 100, time.Second*30, time.Second*20, *metricsStoragePath)
|
||||
if err != nil {
|
||||
log.Fatalf("Error opening storage: %s", err)
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ func (t testTieredStorageCloser) Close() {
|
|||
func NewTestTieredStorage(t test.Tester) (storage *metric.TieredStorage, closer test.Closer) {
|
||||
var directory test.TemporaryDirectory
|
||||
directory = test.NewTemporaryDirectory("test_tiered_storage", t)
|
||||
storage, err := metric.NewTieredStorage(2500, 1000, 5*time.Second, 15*time.Second, 0*time.Second, directory.Path())
|
||||
storage, err := metric.NewTieredStorage(2500, 1000, 5*time.Second, 0*time.Second, directory.Path())
|
||||
|
||||
if err != nil {
|
||||
if storage != nil {
|
||||
|
|
|
@ -86,7 +86,7 @@ func (t testTieredStorageCloser) Close() {
|
|||
func NewTestTieredStorage(t test.Tester) (storage *TieredStorage, closer test.Closer) {
|
||||
var directory test.TemporaryDirectory
|
||||
directory = test.NewTemporaryDirectory("test_tiered_storage", t)
|
||||
storage, err := NewTieredStorage(2500, 1000, 5*time.Second, 15*time.Second, 0*time.Second, directory.Path())
|
||||
storage, err := NewTieredStorage(2500, 1000, 5*time.Second, 0*time.Second, directory.Path())
|
||||
|
||||
if err != nil {
|
||||
if storage != nil {
|
||||
|
|
|
@ -64,7 +64,6 @@ type TieredStorage struct {
|
|||
memoryArena memorySeriesStorage
|
||||
memoryTTL time.Duration
|
||||
flushMemoryInterval time.Duration
|
||||
writeMemoryInterval time.Duration
|
||||
|
||||
// This mutex manages any concurrent reads/writes of the memoryArena.
|
||||
memoryMutex sync.RWMutex
|
||||
|
@ -88,7 +87,7 @@ type viewJob struct {
|
|||
err chan error
|
||||
}
|
||||
|
||||
func NewTieredStorage(appendToDiskQueueDepth, viewQueueDepth uint, flushMemoryInterval, writeMemoryInterval, memoryTTL time.Duration, root string) (storage *TieredStorage, err error) {
|
||||
func NewTieredStorage(appendToDiskQueueDepth, viewQueueDepth uint, flushMemoryInterval, memoryTTL time.Duration, root string) (storage *TieredStorage, err error) {
|
||||
diskStorage, err := NewLevelDBMetricPersistence(root)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -102,7 +101,6 @@ func NewTieredStorage(appendToDiskQueueDepth, viewQueueDepth uint, flushMemoryIn
|
|||
memoryArena: NewMemorySeriesStorage(),
|
||||
memoryTTL: memoryTTL,
|
||||
viewQueue: make(chan viewJob, viewQueueDepth),
|
||||
writeMemoryInterval: writeMemoryInterval,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -185,8 +183,6 @@ func (t *TieredStorage) rebuildDiskFrontier(i leveldb.Iterator) (err error) {
|
|||
func (t *TieredStorage) Serve() {
|
||||
flushMemoryTicker := time.NewTicker(t.flushMemoryInterval)
|
||||
defer flushMemoryTicker.Stop()
|
||||
writeMemoryTicker := time.NewTicker(t.writeMemoryInterval)
|
||||
defer writeMemoryTicker.Stop()
|
||||
queueReportTicker := time.NewTicker(time.Second)
|
||||
defer queueReportTicker.Stop()
|
||||
|
||||
|
|
Loading…
Reference in a new issue