2014-09-19 09:18:44 -07:00
|
|
|
// Copyright 2014 Prometheus Team
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2014-09-16 06:47:24 -07:00
|
|
|
package local
|
2014-06-06 02:55:53 -07:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2014-11-05 11:02:45 -08:00
|
|
|
"math"
|
2014-06-06 02:55:53 -07:00
|
|
|
"os"
|
|
|
|
"path"
|
2015-01-14 07:52:09 -08:00
|
|
|
"path/filepath"
|
2014-11-05 11:02:45 -08:00
|
|
|
"strings"
|
2014-10-07 10:11:24 -07:00
|
|
|
"sync"
|
2014-10-23 06:18:32 -07:00
|
|
|
"sync/atomic"
|
2014-09-23 10:21:10 -07:00
|
|
|
"time"
|
2014-06-06 02:55:53 -07:00
|
|
|
|
|
|
|
"github.com/golang/glog"
|
2014-09-24 07:51:18 -07:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2014-06-06 02:55:53 -07:00
|
|
|
|
|
|
|
clientmodel "github.com/prometheus/client_golang/model"
|
|
|
|
|
2014-09-23 10:21:10 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/local/codable"
|
2015-01-14 07:52:09 -08:00
|
|
|
"github.com/prometheus/prometheus/storage/local/flock"
|
2014-08-21 13:06:11 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/local/index"
|
2014-09-10 09:41:52 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
2014-06-06 02:55:53 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2014-10-15 08:07:12 -07:00
|
|
|
seriesFileSuffix = ".db"
|
|
|
|
seriesTempFileSuffix = ".db.tmp"
|
2014-11-20 12:03:51 -08:00
|
|
|
seriesDirNameLen = 2 // How many bytes of the fingerprint in dir name.
|
2014-09-10 09:41:52 -07:00
|
|
|
|
2014-06-06 02:55:53 -07:00
|
|
|
headsFileName = "heads.db"
|
2014-10-24 11:27:27 -07:00
|
|
|
headsTempFileName = "heads.db.tmp"
|
2014-09-10 09:41:52 -07:00
|
|
|
headsFormatVersion = 1
|
|
|
|
headsMagicString = "PrometheusHeads"
|
|
|
|
|
2014-11-20 12:03:51 -08:00
|
|
|
dirtyFileName = "DIRTY"
|
|
|
|
|
2014-11-04 07:27:14 -08:00
|
|
|
fileBufSize = 1 << 16 // 64kiB.
|
2014-08-12 08:46:46 -07:00
|
|
|
|
2014-06-06 02:55:53 -07:00
|
|
|
chunkHeaderLen = 17
|
|
|
|
chunkHeaderTypeOffset = 0
|
|
|
|
chunkHeaderFirstTimeOffset = 1
|
|
|
|
chunkHeaderLastTimeOffset = 9
|
2014-09-23 10:21:10 -07:00
|
|
|
|
2014-10-06 06:58:12 -07:00
|
|
|
indexingMaxBatchSize = 1024 * 1024
|
|
|
|
indexingBatchTimeout = 500 * time.Millisecond // Commit batch when idle for that long.
|
2014-11-05 11:02:45 -08:00
|
|
|
indexingQueueCapacity = 1024 * 16
|
2014-06-06 02:55:53 -07:00
|
|
|
)
|
|
|
|
|
2014-11-20 12:03:51 -08:00
|
|
|
var fpLen = len(clientmodel.Fingerprint(0).String()) // Length of a fingerprint as string.
|
|
|
|
|
2014-09-16 06:47:24 -07:00
|
|
|
const (
|
2014-10-27 12:40:48 -07:00
|
|
|
flagHeadChunkPersisted byte = 1 << iota
|
|
|
|
// Add more flags here like:
|
|
|
|
// flagFoo
|
|
|
|
// flagBar
|
2014-09-16 06:47:24 -07:00
|
|
|
)
|
|
|
|
|
2014-09-23 10:21:10 -07:00
|
|
|
type indexingOpType byte
|
|
|
|
|
|
|
|
const (
|
|
|
|
add indexingOpType = iota
|
|
|
|
remove
|
|
|
|
)
|
|
|
|
|
|
|
|
type indexingOp struct {
|
|
|
|
fingerprint clientmodel.Fingerprint
|
|
|
|
metric clientmodel.Metric
|
|
|
|
opType indexingOpType
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// A Persistence is used by a Storage implementation to store samples
|
|
|
|
// persistently across restarts. The methods are only goroutine-safe if
|
|
|
|
// explicitly marked as such below. The chunk-related methods PersistChunk,
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
// dropChunks, loadChunks, and loadChunkDescs can be called concurrently with
|
2014-10-07 10:11:24 -07:00
|
|
|
// each other if each call refers to a different fingerprint.
|
|
|
|
type persistence struct {
|
2014-06-06 02:55:53 -07:00
|
|
|
basePath string
|
|
|
|
chunkLen int
|
2014-09-10 09:41:52 -07:00
|
|
|
|
|
|
|
archivedFingerprintToMetrics *index.FingerprintMetricIndex
|
|
|
|
archivedFingerprintToTimeRange *index.FingerprintTimeRangeIndex
|
|
|
|
labelPairToFingerprints *index.LabelPairFingerprintIndex
|
|
|
|
labelNameToLabelValues *index.LabelNameLabelValuesIndex
|
2014-09-23 10:21:10 -07:00
|
|
|
|
|
|
|
indexingQueue chan indexingOp
|
|
|
|
indexingStopped chan struct{}
|
2014-09-24 05:41:38 -07:00
|
|
|
indexingFlush chan chan int
|
2014-09-24 07:51:18 -07:00
|
|
|
|
|
|
|
indexingQueueLength prometheus.Gauge
|
|
|
|
indexingQueueCapacity prometheus.Metric
|
|
|
|
indexingBatchSizes prometheus.Summary
|
|
|
|
indexingBatchLatency prometheus.Summary
|
2014-10-24 11:27:27 -07:00
|
|
|
checkpointDuration prometheus.Gauge
|
2014-11-05 11:02:45 -08:00
|
|
|
|
2015-01-14 07:52:09 -08:00
|
|
|
dirtyMtx sync.Mutex // Protects dirty and becameDirty.
|
|
|
|
dirty bool // true if persistence was started in dirty state.
|
|
|
|
becameDirty bool // true if an inconsistency came up during runtime.
|
|
|
|
dirtyFileName string // The file used for locking and to mark dirty state.
|
|
|
|
fLock flock.Releaser // The file lock to protect against concurrent usage.
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// newPersistence returns a newly allocated persistence backed by local disk storage, ready to use.
|
2014-11-05 11:02:45 -08:00
|
|
|
func newPersistence(basePath string, chunkLen int, dirty bool) (*persistence, error) {
|
2014-09-10 09:41:52 -07:00
|
|
|
if err := os.MkdirAll(basePath, 0700); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-01-14 07:52:09 -08:00
|
|
|
dirtyPath := filepath.Join(basePath, dirtyFileName)
|
|
|
|
|
|
|
|
fLock, dirtyfileExisted, err := flock.New(dirtyPath)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Could not lock %s, Prometheus already running?", dirtyPath)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if dirtyfileExisted {
|
|
|
|
dirty = true
|
|
|
|
}
|
|
|
|
|
2014-09-23 10:21:10 -07:00
|
|
|
archivedFingerprintToMetrics, err := index.NewFingerprintMetricIndex(basePath)
|
2014-08-21 13:06:11 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-09-23 10:21:10 -07:00
|
|
|
archivedFingerprintToTimeRange, err := index.NewFingerprintTimeRangeIndex(basePath)
|
2014-09-10 09:41:52 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
p := &persistence{
|
2014-09-23 10:21:10 -07:00
|
|
|
basePath: basePath,
|
|
|
|
chunkLen: chunkLen,
|
|
|
|
|
|
|
|
archivedFingerprintToMetrics: archivedFingerprintToMetrics,
|
|
|
|
archivedFingerprintToTimeRange: archivedFingerprintToTimeRange,
|
|
|
|
|
|
|
|
indexingQueue: make(chan indexingOp, indexingQueueCapacity),
|
|
|
|
indexingStopped: make(chan struct{}),
|
2014-09-24 05:41:38 -07:00
|
|
|
indexingFlush: make(chan chan int),
|
2014-09-24 07:51:18 -07:00
|
|
|
|
|
|
|
indexingQueueLength: prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "indexing_queue_length",
|
|
|
|
Help: "The number of metrics waiting to be indexed.",
|
|
|
|
}),
|
|
|
|
indexingQueueCapacity: prometheus.MustNewConstMetric(
|
|
|
|
prometheus.NewDesc(
|
|
|
|
prometheus.BuildFQName(namespace, subsystem, "indexing_queue_capacity"),
|
|
|
|
"The capacity of the indexing queue.",
|
|
|
|
nil, nil,
|
|
|
|
),
|
|
|
|
prometheus.GaugeValue,
|
|
|
|
float64(indexingQueueCapacity),
|
|
|
|
),
|
|
|
|
indexingBatchSizes: prometheus.NewSummary(
|
|
|
|
prometheus.SummaryOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "indexing_batch_sizes",
|
|
|
|
Help: "Quantiles for indexing batch sizes (number of metrics per batch).",
|
|
|
|
},
|
|
|
|
),
|
|
|
|
indexingBatchLatency: prometheus.NewSummary(
|
|
|
|
prometheus.SummaryOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "indexing_batch_latency_milliseconds",
|
|
|
|
Help: "Quantiles for batch indexing latencies in milliseconds.",
|
|
|
|
},
|
|
|
|
),
|
2014-10-24 11:27:27 -07:00
|
|
|
checkpointDuration: prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "checkpoint_duration_milliseconds",
|
|
|
|
Help: "The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks.",
|
|
|
|
}),
|
2015-01-14 07:52:09 -08:00
|
|
|
dirty: dirty,
|
|
|
|
dirtyFileName: dirtyPath,
|
|
|
|
fLock: fLock,
|
2014-11-05 11:02:45 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if p.dirty {
|
|
|
|
// Blow away the label indexes. We'll rebuild them later.
|
|
|
|
if err := index.DeleteLabelPairFingerprintIndex(basePath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := index.DeleteLabelNameLabelValuesIndex(basePath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
labelPairToFingerprints, err := index.NewLabelPairFingerprintIndex(basePath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
labelNameToLabelValues, err := index.NewLabelNameLabelValuesIndex(basePath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-09-23 10:21:10 -07:00
|
|
|
}
|
2014-11-05 11:02:45 -08:00
|
|
|
p.labelPairToFingerprints = labelPairToFingerprints
|
|
|
|
p.labelNameToLabelValues = labelNameToLabelValues
|
|
|
|
|
2014-09-23 10:21:10 -07:00
|
|
|
go p.processIndexingQueue()
|
|
|
|
return p, nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-09-24 07:51:18 -07:00
|
|
|
// Describe implements prometheus.Collector.
|
2014-10-07 10:11:24 -07:00
|
|
|
func (p *persistence) Describe(ch chan<- *prometheus.Desc) {
|
2014-09-24 07:51:18 -07:00
|
|
|
ch <- p.indexingQueueLength.Desc()
|
|
|
|
ch <- p.indexingQueueCapacity.Desc()
|
|
|
|
p.indexingBatchSizes.Describe(ch)
|
|
|
|
p.indexingBatchLatency.Describe(ch)
|
2014-10-24 11:27:27 -07:00
|
|
|
ch <- p.checkpointDuration.Desc()
|
2014-09-24 07:51:18 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Collect implements prometheus.Collector.
|
2014-10-07 10:11:24 -07:00
|
|
|
func (p *persistence) Collect(ch chan<- prometheus.Metric) {
|
2014-09-24 07:51:18 -07:00
|
|
|
p.indexingQueueLength.Set(float64(len(p.indexingQueue)))
|
|
|
|
|
|
|
|
ch <- p.indexingQueueLength
|
|
|
|
ch <- p.indexingQueueCapacity
|
|
|
|
p.indexingBatchSizes.Collect(ch)
|
|
|
|
p.indexingBatchLatency.Collect(ch)
|
2014-10-24 11:27:27 -07:00
|
|
|
ch <- p.checkpointDuration
|
2014-09-24 07:51:18 -07:00
|
|
|
}
|
|
|
|
|
2014-11-05 11:02:45 -08:00
|
|
|
// isDirty returns the dirty flag in a goroutine-safe way.
|
|
|
|
func (p *persistence) isDirty() bool {
|
|
|
|
p.dirtyMtx.Lock()
|
|
|
|
defer p.dirtyMtx.Unlock()
|
|
|
|
return p.dirty
|
|
|
|
}
|
|
|
|
|
|
|
|
// setDirty sets the dirty flag in a goroutine-safe way. Once the dirty flag was
|
|
|
|
// set to true with this method, it cannot be set to false again. (If we became
|
|
|
|
// dirty during our runtime, there is no way back. If we were dirty from the
|
|
|
|
// start, a clean-up might make us clean again.)
|
|
|
|
func (p *persistence) setDirty(dirty bool) {
|
|
|
|
p.dirtyMtx.Lock()
|
|
|
|
defer p.dirtyMtx.Unlock()
|
|
|
|
if p.becameDirty {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p.dirty = dirty
|
|
|
|
if dirty {
|
|
|
|
p.becameDirty = true
|
|
|
|
glog.Error("The storage is now inconsistent. Restart Prometheus ASAP to initiate recovery.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-20 12:03:51 -08:00
|
|
|
// recoverFromCrash is called by loadSeriesMapAndHeads if the persistence
|
|
|
|
// appears to be dirty after the loading (either because the loading resulted in
|
|
|
|
// an error or because the persistence was dirty from the start). Not goroutine
|
|
|
|
// safe. Only call before anything else is running (except index processing
|
|
|
|
// queue as started by newPersistence).
|
|
|
|
func (p *persistence) recoverFromCrash(fingerprintToSeries map[clientmodel.Fingerprint]*memorySeries) error {
|
2014-12-17 08:18:04 -08:00
|
|
|
// TODO(beorn): We need proper tests for the crash recovery.
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Starting crash recovery. Prometheus is inoperational until complete.")
|
|
|
|
|
|
|
|
fpsSeen := map[clientmodel.Fingerprint]struct{}{}
|
|
|
|
count := 0
|
2014-12-17 08:18:04 -08:00
|
|
|
seriesDirNameFmt := fmt.Sprintf("%%0%dx", seriesDirNameLen)
|
2014-11-05 11:02:45 -08:00
|
|
|
|
|
|
|
glog.Info("Scanning files.")
|
2014-11-20 12:03:51 -08:00
|
|
|
for i := 0; i < 1<<(seriesDirNameLen*4); i++ {
|
|
|
|
dirname := path.Join(p.basePath, fmt.Sprintf(seriesDirNameFmt, i))
|
2014-11-05 11:02:45 -08:00
|
|
|
dir, err := os.Open(dirname)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dir.Close()
|
2014-11-20 12:03:51 -08:00
|
|
|
for fis := []os.FileInfo{}; err != io.EOF; fis, err = dir.Readdir(1024) {
|
2014-11-05 11:02:45 -08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, fi := range fis {
|
|
|
|
fp, ok := p.sanitizeSeries(dirname, fi, fingerprintToSeries)
|
|
|
|
if ok {
|
|
|
|
fpsSeen[fp] = struct{}{}
|
|
|
|
}
|
|
|
|
count++
|
|
|
|
if count%10000 == 0 {
|
|
|
|
glog.Infof("%d files scanned.", count)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-11-20 12:03:51 -08:00
|
|
|
glog.Infof("File scan complete. %d series found.", len(fpsSeen))
|
2014-11-05 11:02:45 -08:00
|
|
|
|
|
|
|
glog.Info("Checking for series without series file.")
|
|
|
|
for fp, s := range fingerprintToSeries {
|
|
|
|
if _, seen := fpsSeen[fp]; !seen {
|
|
|
|
// fp exists in fingerprintToSeries, but has no representation on disk.
|
|
|
|
if s.headChunkPersisted {
|
|
|
|
// Oops, head chunk was persisted, but nothing on disk.
|
|
|
|
// Thus, we lost that series completely. Clean up the remnants.
|
|
|
|
delete(fingerprintToSeries, fp)
|
|
|
|
if err := p.dropArchivedMetric(fp); err != nil {
|
|
|
|
// Dropping the archived metric didn't work, so try
|
|
|
|
// to unindex it, just in case it's in the indexes.
|
|
|
|
p.unindexMetric(fp, s.metric)
|
|
|
|
}
|
|
|
|
glog.Warningf("Lost series detected: fingerprint %v, metric %v.", fp, s.metric)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// If we are here, the only chunk we have is the head chunk.
|
|
|
|
// Adjust things accordingly.
|
|
|
|
if len(s.chunkDescs) > 1 || s.chunkDescsOffset != 0 {
|
2014-11-20 12:03:51 -08:00
|
|
|
minLostChunks := len(s.chunkDescs) + s.chunkDescsOffset - 1
|
|
|
|
if minLostChunks <= 0 {
|
|
|
|
glog.Warningf(
|
|
|
|
"Possible loss of chunks for fingerprint %v, metric %v.",
|
|
|
|
fp, s.metric,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
glog.Warningf(
|
|
|
|
"Lost at least %d chunks for fingerprint %v, metric %v.",
|
|
|
|
minLostChunks, fp, s.metric,
|
|
|
|
)
|
|
|
|
}
|
2014-11-05 11:02:45 -08:00
|
|
|
s.chunkDescs = s.chunkDescs[len(s.chunkDescs)-1:]
|
|
|
|
s.chunkDescsOffset = 0
|
|
|
|
}
|
|
|
|
fpsSeen[fp] = struct{}{} // Add so that fpsSeen is complete.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
glog.Info("Check for series without series file complete.")
|
|
|
|
|
|
|
|
if err := p.cleanUpArchiveIndexes(fingerprintToSeries, fpsSeen); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := p.rebuildLabelIndexes(fingerprintToSeries); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
p.setDirty(false)
|
|
|
|
glog.Warning("Crash recovery complete.")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
// sanitizeSeries sanitizes a series based on its series file as defined by the
|
|
|
|
// provided directory and FileInfo. The method returns the fingerprint as
|
|
|
|
// derived from the directory and file name, and whether the provided file has
|
|
|
|
// been sanitized. A file that failed to be sanitized is deleted, if possible.
|
2014-11-27 11:46:45 -08:00
|
|
|
//
|
|
|
|
// The following steps are performed:
|
|
|
|
//
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
// - A file whose name doesn't comply with the naming scheme of a series file is
|
|
|
|
// simply deleted.
|
2014-11-27 11:46:45 -08:00
|
|
|
//
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
// - If the size of the series file isn't a multiple of the chunk size,
|
|
|
|
// extraneous bytes are truncated. If the truncation fails, the file is
|
|
|
|
// deleted instead.
|
2014-11-27 11:46:45 -08:00
|
|
|
//
|
|
|
|
// - A file that is empty (after truncation) is deleted.
|
|
|
|
//
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
// - A series that is not archived (i.e. it is in the fingerprintToSeries map)
|
|
|
|
// is checked for consistency of its various parameters (like head-chunk
|
|
|
|
// persistence state, offset of chunkDescs etc.). In particular, overlap
|
|
|
|
// between an in-memory head chunk with the most recent persisted chunk is
|
|
|
|
// checked. Inconsistencies are rectified.
|
2014-11-27 11:46:45 -08:00
|
|
|
//
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
// - A series this in archived (i.e. it is not in the fingerprintToSeries map)
|
|
|
|
// is checked for its presence in the index of archived series. If it cannot
|
|
|
|
// be found there, it is deleted.
|
2014-11-05 11:02:45 -08:00
|
|
|
func (p *persistence) sanitizeSeries(dirname string, fi os.FileInfo, fingerprintToSeries map[clientmodel.Fingerprint]*memorySeries) (clientmodel.Fingerprint, bool) {
|
|
|
|
filename := path.Join(dirname, fi.Name())
|
|
|
|
purge := func() {
|
|
|
|
glog.Warningf("Deleting lost series file %s.", filename) // TODO: Move to lost+found directory?
|
|
|
|
os.Remove(filename)
|
|
|
|
}
|
|
|
|
|
|
|
|
var fp clientmodel.Fingerprint
|
2014-11-20 12:03:51 -08:00
|
|
|
if len(fi.Name()) != fpLen-seriesDirNameLen+len(seriesFileSuffix) ||
|
|
|
|
!strings.HasSuffix(fi.Name(), seriesFileSuffix) {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warningf("Unexpected series file name %s.", filename)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
2014-11-27 11:46:45 -08:00
|
|
|
if err := fp.LoadFromString(path.Base(dirname) + fi.Name()[:fpLen-seriesDirNameLen]); err != nil {
|
|
|
|
glog.Warningf("Error parsing file name %s: %s", filename, err)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
2014-11-05 11:02:45 -08:00
|
|
|
|
|
|
|
bytesToTrim := fi.Size() % int64(p.chunkLen+chunkHeaderLen)
|
|
|
|
chunksInFile := int(fi.Size()) / (p.chunkLen + chunkHeaderLen)
|
|
|
|
if bytesToTrim != 0 {
|
|
|
|
glog.Warningf(
|
|
|
|
"Truncating file %s to exactly %d chunks, trimming %d extraneous bytes.",
|
|
|
|
filename, chunksInFile, bytesToTrim,
|
|
|
|
)
|
|
|
|
f, err := os.OpenFile(filename, os.O_WRONLY, 0640)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Could not open file %s: %s", filename, err)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
if err := f.Truncate(fi.Size() - bytesToTrim); err != nil {
|
|
|
|
glog.Errorf("Failed to truncate file %s: %s", filename, err)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if chunksInFile == 0 {
|
|
|
|
glog.Warningf("No chunks left in file %s.", filename)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
|
|
|
|
s, ok := fingerprintToSeries[fp]
|
2014-11-20 12:03:51 -08:00
|
|
|
if ok { // This series is supposed to not be archived.
|
2014-11-05 11:02:45 -08:00
|
|
|
if s == nil {
|
|
|
|
panic("fingerprint mapped to nil pointer")
|
|
|
|
}
|
|
|
|
if bytesToTrim == 0 && s.chunkDescsOffset != -1 &&
|
|
|
|
((s.headChunkPersisted && chunksInFile == s.chunkDescsOffset+len(s.chunkDescs)) ||
|
|
|
|
(!s.headChunkPersisted && chunksInFile == s.chunkDescsOffset+len(s.chunkDescs)-1)) {
|
|
|
|
// Everything is consistent. We are good.
|
|
|
|
return fp, true
|
|
|
|
}
|
|
|
|
// If we are here, something's fishy.
|
|
|
|
if s.headChunkPersisted {
|
|
|
|
// This is the easy case as we don't have a head chunk
|
|
|
|
// in heads.db. Treat this series as a freshly
|
|
|
|
// unarchived one. No chunks or chunkDescs in memory, no
|
|
|
|
// current head chunk.
|
|
|
|
glog.Warningf(
|
2014-11-20 12:03:51 -08:00
|
|
|
"Treating recovered metric %v, fingerprint %v, as freshly unarchived, with %d chunks in series file.",
|
2014-11-05 11:02:45 -08:00
|
|
|
s.metric, fp, chunksInFile,
|
|
|
|
)
|
|
|
|
s.chunkDescs = nil
|
|
|
|
s.chunkDescsOffset = -1
|
|
|
|
return fp, true
|
|
|
|
}
|
|
|
|
// This is the tricky one: We have a head chunk from heads.db,
|
|
|
|
// but the very same head chunk might already be in the series
|
|
|
|
// file. Strategy: Check the first time of both. If it is the
|
|
|
|
// same or newer, assume the latest chunk in the series file
|
|
|
|
// is the most recent head chunk. If not, keep the head chunk
|
|
|
|
// we got from heads.db.
|
|
|
|
// First, assume the head chunk is not yet persisted.
|
|
|
|
s.chunkDescs = s.chunkDescs[len(s.chunkDescs)-1:]
|
|
|
|
s.chunkDescsOffset = -1
|
|
|
|
// Load all the chunk descs (which assumes we have none from the future).
|
|
|
|
cds, err := p.loadChunkDescs(fp, clientmodel.Now())
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf(
|
|
|
|
"Failed to load chunk descriptors for metric %v, fingerprint %v: %s",
|
|
|
|
s.metric, fp, err,
|
|
|
|
)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
if cds[len(cds)-1].firstTime().Before(s.head().firstTime()) {
|
|
|
|
s.chunkDescs = append(cds, s.chunkDescs...)
|
|
|
|
glog.Warningf(
|
|
|
|
"Recovered metric %v, fingerprint %v: recovered %d chunks from series file, recovered head chunk from checkpoint.",
|
|
|
|
s.metric, fp, chunksInFile,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
glog.Warningf(
|
|
|
|
"Recovered metric %v, fingerprint %v: head chunk found among the %d recovered chunks in series file.",
|
|
|
|
s.metric, fp, chunksInFile,
|
|
|
|
)
|
|
|
|
s.chunkDescs = cds
|
|
|
|
s.headChunkPersisted = true
|
|
|
|
}
|
|
|
|
s.chunkDescsOffset = 0
|
|
|
|
return fp, true
|
|
|
|
}
|
|
|
|
// This series is supposed to be archived.
|
|
|
|
metric, err := p.getArchivedMetric(fp)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf(
|
|
|
|
"Fingerprint %v assumed archived but couldn't be looked up in archived index: %s",
|
|
|
|
fp, err,
|
|
|
|
)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
if metric == nil {
|
|
|
|
glog.Warningf(
|
|
|
|
"Fingerprint %v assumed archived but couldn't be found in archived index.",
|
|
|
|
fp,
|
|
|
|
)
|
|
|
|
purge()
|
|
|
|
return fp, false
|
|
|
|
}
|
|
|
|
// This series looks like a properly archived one.
|
|
|
|
return fp, true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *persistence) cleanUpArchiveIndexes(
|
|
|
|
fpToSeries map[clientmodel.Fingerprint]*memorySeries,
|
|
|
|
fpsSeen map[clientmodel.Fingerprint]struct{},
|
|
|
|
) error {
|
|
|
|
glog.Info("Cleaning up archive indexes.")
|
|
|
|
var fp codable.Fingerprint
|
|
|
|
var m codable.Metric
|
|
|
|
count := 0
|
|
|
|
if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
|
|
|
|
count++
|
|
|
|
if count%10000 == 0 {
|
|
|
|
glog.Infof("%d archived metrics checked.", count)
|
|
|
|
}
|
|
|
|
if err := kv.Key(&fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, fpSeen := fpsSeen[clientmodel.Fingerprint(fp)]
|
|
|
|
inMemory := false
|
|
|
|
if fpSeen {
|
|
|
|
_, inMemory = fpToSeries[clientmodel.Fingerprint(fp)]
|
|
|
|
}
|
|
|
|
if !fpSeen || inMemory {
|
|
|
|
if inMemory {
|
|
|
|
glog.Warningf("Archive clean-up: Fingerprint %v is not archived. Purging from archive indexes.", clientmodel.Fingerprint(fp))
|
|
|
|
}
|
|
|
|
if !fpSeen {
|
|
|
|
glog.Warningf("Archive clean-up: Fingerprint %v is unknown. Purging from archive indexes.", clientmodel.Fingerprint(fp))
|
|
|
|
}
|
|
|
|
if err := p.archivedFingerprintToMetrics.Delete(fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Delete from timerange index, too.
|
|
|
|
p.archivedFingerprintToTimeRange.Delete(fp)
|
|
|
|
// TODO: Ignoring errors here as fp might not be in
|
|
|
|
// timerange index (which is good) but which would
|
|
|
|
// return an error. Delete signature could be changed
|
|
|
|
// like the Get signature to detect a real error.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// fp is legitimately archived. Make sure it is in timerange index, too.
|
|
|
|
has, err := p.archivedFingerprintToTimeRange.Has(fp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if has {
|
|
|
|
return nil // All good.
|
|
|
|
}
|
|
|
|
glog.Warningf("Archive clean-up: Fingerprint %v is not in time-range index. Unarchiving it for recovery.")
|
|
|
|
if err := p.archivedFingerprintToMetrics.Delete(fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := kv.Value(&m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
series := newMemorySeries(clientmodel.Metric(m), false, math.MinInt64)
|
|
|
|
cds, err := p.loadChunkDescs(clientmodel.Fingerprint(fp), clientmodel.Now())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
series.chunkDescs = cds
|
|
|
|
series.chunkDescsOffset = 0
|
|
|
|
fpToSeries[clientmodel.Fingerprint(fp)] = series
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
count = 0
|
|
|
|
if err := p.archivedFingerprintToTimeRange.ForEach(func(kv index.KeyValueAccessor) error {
|
|
|
|
count++
|
|
|
|
if count%10000 == 0 {
|
|
|
|
glog.Infof("%d archived time ranges checked.", count)
|
|
|
|
}
|
|
|
|
if err := kv.Key(&fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
has, err := p.archivedFingerprintToMetrics.Has(fp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if has {
|
|
|
|
return nil // All good.
|
|
|
|
}
|
|
|
|
glog.Warningf("Archive clean-up: Purging unknown fingerprint %v in time-range index.", fp)
|
|
|
|
if err := p.archivedFingerprintToTimeRange.Delete(fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
glog.Info("Clean-up of archive indexes complete.")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *persistence) rebuildLabelIndexes(
|
|
|
|
fpToSeries map[clientmodel.Fingerprint]*memorySeries,
|
|
|
|
) error {
|
|
|
|
count := 0
|
|
|
|
glog.Info("Rebuilding label indexes.")
|
|
|
|
glog.Info("Indexing metrics in memory.")
|
|
|
|
for fp, s := range fpToSeries {
|
|
|
|
p.indexMetric(fp, s.metric)
|
|
|
|
count++
|
|
|
|
if count%10000 == 0 {
|
|
|
|
glog.Infof("%d metrics queued for indexing.", count)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
glog.Info("Indexing archived metrics.")
|
|
|
|
var fp codable.Fingerprint
|
|
|
|
var m codable.Metric
|
|
|
|
if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
|
|
|
|
if err := kv.Key(&fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := kv.Value(&m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.indexMetric(clientmodel.Fingerprint(fp), clientmodel.Metric(m))
|
|
|
|
count++
|
|
|
|
if count%10000 == 0 {
|
|
|
|
glog.Infof("%d metrics queued for indexing.", count)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
glog.Info("All requests for rebuilding the label indexes queued. (Actual processing may lag behind.)")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// getFingerprintsForLabelPair returns the fingerprints for the given label
|
|
|
|
// pair. This method is goroutine-safe but take into account that metrics queued
|
2014-11-20 12:03:51 -08:00
|
|
|
// for indexing with IndexMetric might not have made it into the index
|
|
|
|
// yet. (Same applies correspondingly to UnindexMetric.)
|
2014-10-07 10:11:24 -07:00
|
|
|
func (p *persistence) getFingerprintsForLabelPair(lp metric.LabelPair) (clientmodel.Fingerprints, error) {
|
2014-09-10 09:41:52 -07:00
|
|
|
fps, _, err := p.labelPairToFingerprints.Lookup(lp)
|
2014-06-06 02:55:53 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-09-10 09:41:52 -07:00
|
|
|
return fps, nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// getLabelValuesForLabelName returns the label values for the given label
|
|
|
|
// name. This method is goroutine-safe but take into account that metrics queued
|
2014-11-20 12:03:51 -08:00
|
|
|
// for indexing with IndexMetric might not have made it into the index
|
|
|
|
// yet. (Same applies correspondingly to UnindexMetric.)
|
2014-10-07 10:11:24 -07:00
|
|
|
func (p *persistence) getLabelValuesForLabelName(ln clientmodel.LabelName) (clientmodel.LabelValues, error) {
|
2014-09-10 09:41:52 -07:00
|
|
|
lvs, _, err := p.labelNameToLabelValues.Lookup(ln)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return lvs, nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// persistChunk persists a single chunk of a series. It is the caller's
|
2014-11-20 12:03:51 -08:00
|
|
|
// responsibility to not modify the chunk concurrently and to not persist or
|
|
|
|
// drop anything for the same fingerprint concurrently. It returns the
|
|
|
|
// (zero-based) index of the persisted chunk within the series file. In case of
|
|
|
|
// an error, the returned index is -1 (to avoid the misconception that the chunk
|
|
|
|
// was written at position 0).
|
2014-10-27 12:40:48 -07:00
|
|
|
func (p *persistence) persistChunk(fp clientmodel.Fingerprint, c chunk) (int, error) {
|
2014-06-06 02:55:53 -07:00
|
|
|
// 1. Open chunk file.
|
|
|
|
f, err := p.openChunkFileForWriting(fp)
|
|
|
|
if err != nil {
|
2014-10-27 12:40:48 -07:00
|
|
|
return -1, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
b := bufio.NewWriterSize(f, chunkHeaderLen+p.chunkLen)
|
|
|
|
|
|
|
|
// 2. Write the header (chunk type and first/last times).
|
|
|
|
err = writeChunkHeader(b, c)
|
|
|
|
if err != nil {
|
2014-10-27 12:40:48 -07:00
|
|
|
return -1, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// 3. Write chunk into file.
|
2014-10-27 12:40:48 -07:00
|
|
|
err = c.marshal(b)
|
|
|
|
if err != nil {
|
|
|
|
return -1, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// 4. Determine index within the file.
|
|
|
|
b.Flush()
|
|
|
|
offset, err := f.Seek(0, os.SEEK_CUR)
|
|
|
|
if err != nil {
|
|
|
|
return -1, err
|
|
|
|
}
|
|
|
|
index, err := p.chunkIndexForOffset(offset)
|
|
|
|
if err != nil {
|
|
|
|
return -1, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return index - 1, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// loadChunks loads a group of chunks of a timeseries by their index. The chunk
|
|
|
|
// with the earliest time will have index 0, the following ones will have
|
2014-10-27 12:40:48 -07:00
|
|
|
// incrementally larger indexes. The indexOffset denotes the offset to be added to
|
|
|
|
// each index in indexes. It is the caller's responsibility to not persist or
|
|
|
|
// drop anything for the same fingerprint concurrently.
|
|
|
|
func (p *persistence) loadChunks(fp clientmodel.Fingerprint, indexes []int, indexOffset int) ([]chunk, error) {
|
2014-06-06 02:55:53 -07:00
|
|
|
f, err := p.openChunkFileForReading(fp)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
2014-10-15 06:53:05 -07:00
|
|
|
chunks := make([]chunk, 0, len(indexes))
|
2014-06-06 02:55:53 -07:00
|
|
|
typeBuf := make([]byte, 1)
|
|
|
|
for _, idx := range indexes {
|
2014-10-27 12:40:48 -07:00
|
|
|
_, err := f.Seek(p.offsetForChunkIndex(idx+indexOffset), os.SEEK_SET)
|
2014-06-06 02:55:53 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := f.Read(typeBuf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if n != 1 {
|
|
|
|
panic("read returned != 1 bytes")
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = f.Seek(chunkHeaderLen-1, os.SEEK_CUR)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
chunk := chunkForType(typeBuf[0])
|
|
|
|
chunk.unmarshal(f)
|
|
|
|
chunks = append(chunks, chunk)
|
|
|
|
}
|
|
|
|
return chunks, nil
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// loadChunkDescs loads chunkDescs for a series up until a given time. It is
|
|
|
|
// the caller's responsibility to not persist or drop anything for the same
|
|
|
|
// fingerprint concurrently.
|
2014-10-15 06:53:05 -07:00
|
|
|
func (p *persistence) loadChunkDescs(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) ([]*chunkDesc, error) {
|
2014-06-06 02:55:53 -07:00
|
|
|
f, err := p.openChunkFileForReading(fp)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
fi, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
totalChunkLen := chunkHeaderLen + p.chunkLen
|
|
|
|
if fi.Size()%int64(totalChunkLen) != 0 {
|
2014-11-27 11:46:45 -08:00
|
|
|
p.setDirty(true)
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"size of series file for fingerprint %v is %d, which is not a multiple of the chunk length %d",
|
|
|
|
fp, fi.Size(), totalChunkLen,
|
|
|
|
)
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
numChunks := int(fi.Size()) / totalChunkLen
|
2014-10-15 06:53:05 -07:00
|
|
|
cds := make([]*chunkDesc, 0, numChunks)
|
2014-06-06 02:55:53 -07:00
|
|
|
for i := 0; i < numChunks; i++ {
|
|
|
|
_, err := f.Seek(p.offsetForChunkIndex(i)+chunkHeaderFirstTimeOffset, os.SEEK_SET)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
chunkTimesBuf := make([]byte, 16)
|
|
|
|
_, err = io.ReadAtLeast(f, chunkTimesBuf, 16)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cd := &chunkDesc{
|
2014-10-15 06:53:05 -07:00
|
|
|
chunkFirstTime: clientmodel.Timestamp(binary.LittleEndian.Uint64(chunkTimesBuf)),
|
|
|
|
chunkLastTime: clientmodel.Timestamp(binary.LittleEndian.Uint64(chunkTimesBuf[8:])),
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-10-28 11:01:41 -07:00
|
|
|
if !cd.chunkLastTime.Before(beforeTime) {
|
2014-06-06 02:55:53 -07:00
|
|
|
// From here on, we have chunkDescs in memory already.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
cds = append(cds, cd)
|
|
|
|
}
|
2014-10-22 10:21:23 -07:00
|
|
|
chunkDescOps.WithLabelValues(load).Add(float64(len(cds)))
|
2014-11-27 09:25:03 -08:00
|
|
|
numMemChunkDescs.Add(float64(len(cds)))
|
2014-06-06 02:55:53 -07:00
|
|
|
return cds, nil
|
|
|
|
}
|
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
// checkpointSeriesMapAndHeads persists the fingerprint to memory-series mapping
|
2014-10-07 10:11:24 -07:00
|
|
|
// and all open (non-full) head chunks. Do not call concurrently with
|
2014-10-28 11:01:41 -07:00
|
|
|
// loadSeriesMapAndHeads.
|
2014-11-27 11:46:45 -08:00
|
|
|
//
|
|
|
|
// Description of the file format:
|
|
|
|
//
|
|
|
|
// (1) Magic string (const headsMagicString).
|
|
|
|
//
|
|
|
|
// (2) Varint-encoded format version (const headsFormatVersion).
|
|
|
|
//
|
|
|
|
// (3) Number of series in checkpoint as big-endian uint64.
|
|
|
|
//
|
|
|
|
// (4) Repeated once per series:
|
|
|
|
//
|
|
|
|
// (4.1) A flag byte, see flag constants above.
|
|
|
|
//
|
|
|
|
// (4.2) The fingerprint as big-endian uint64.
|
|
|
|
//
|
|
|
|
// (4.3) The metric as defined by codable.Metric.
|
|
|
|
//
|
|
|
|
// (4.4) The varint-encoded chunkDescsOffset.
|
|
|
|
//
|
|
|
|
// (4.5) The varint-encoded savedFirstTime.
|
|
|
|
//
|
|
|
|
// (4.6) The varint-encoded number of chunk descriptors.
|
|
|
|
//
|
|
|
|
// (4.7) Repeated once per chunk descriptor, oldest to most recent:
|
|
|
|
//
|
|
|
|
// (4.7.1) The varint-encoded first time.
|
|
|
|
//
|
|
|
|
// (4.7.2) The varint-encoded last time.
|
|
|
|
//
|
|
|
|
// (4.8) Exception to 4.7: If the most recent chunk is a non-persisted head chunk,
|
|
|
|
// the following is persisted instead of the most recent chunk descriptor:
|
|
|
|
//
|
|
|
|
// (4.8.1) A byte defining the chunk type.
|
|
|
|
//
|
|
|
|
// (4.8.2) The head chunk itself, marshaled with the marshal() method.
|
|
|
|
//
|
2014-10-24 11:27:27 -07:00
|
|
|
func (p *persistence) checkpointSeriesMapAndHeads(fingerprintToSeries *seriesMap, fpLocker *fingerprintLocker) (err error) {
|
|
|
|
glog.Info("Checkpointing in-memory metrics and head chunks...")
|
|
|
|
begin := time.Now()
|
|
|
|
f, err := os.OpenFile(p.headsTempFileName(), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0640)
|
2014-06-06 02:55:53 -07:00
|
|
|
if err != nil {
|
2014-10-24 11:27:27 -07:00
|
|
|
return
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-10-24 11:27:27 -07:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
closeErr := f.Close()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = closeErr
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = os.Rename(p.headsTempFileName(), p.headsFileName())
|
|
|
|
duration := time.Since(begin)
|
|
|
|
p.checkpointDuration.Set(float64(duration) / float64(time.Millisecond))
|
|
|
|
glog.Infof("Done checkpointing in-memory metrics and head chunks in %v.", duration)
|
|
|
|
}()
|
|
|
|
|
2014-09-10 09:41:52 -07:00
|
|
|
w := bufio.NewWriterSize(f, fileBufSize)
|
2014-06-06 02:55:53 -07:00
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
if _, err = w.WriteString(headsMagicString); err != nil {
|
|
|
|
return
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
2014-10-24 11:27:27 -07:00
|
|
|
var numberOfSeriesOffset int
|
|
|
|
if numberOfSeriesOffset, err = codable.EncodeVarint(w, headsFormatVersion); err != nil {
|
|
|
|
return
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
2014-10-24 11:27:27 -07:00
|
|
|
numberOfSeriesOffset += len(headsMagicString)
|
|
|
|
numberOfSeriesInHeader := uint64(fingerprintToSeries.length())
|
|
|
|
// We have to write the number of series as uint64 because we might need
|
|
|
|
// to overwrite it later, and a varint might change byte width then.
|
|
|
|
if err = codable.EncodeUint64(w, numberOfSeriesInHeader); err != nil {
|
|
|
|
return
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
iter := fingerprintToSeries.iter()
|
|
|
|
defer func() {
|
|
|
|
// Consume the iterator in any case to not leak goroutines.
|
2014-12-26 04:37:30 -08:00
|
|
|
for range iter {
|
2014-10-07 10:11:24 -07:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
var realNumberOfSeries uint64
|
2014-10-07 10:11:24 -07:00
|
|
|
for m := range iter {
|
2014-10-24 11:27:27 -07:00
|
|
|
func() { // Wrapped in function to use defer for unlocking the fp.
|
|
|
|
fpLocker.Lock(m.fp)
|
|
|
|
defer fpLocker.Unlock(m.fp)
|
|
|
|
|
|
|
|
if len(m.series.chunkDescs) == 0 {
|
|
|
|
// This series was completely purged or archived in the meantime. Ignore.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
realNumberOfSeries++
|
|
|
|
var seriesFlags byte
|
|
|
|
if m.series.headChunkPersisted {
|
|
|
|
seriesFlags |= flagHeadChunkPersisted
|
|
|
|
}
|
|
|
|
if err = w.WriteByte(seriesFlags); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err = codable.EncodeUint64(w, uint64(m.fp)); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var buf []byte
|
|
|
|
buf, err = codable.Metric(m.series.metric).MarshalBinary()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.Write(buf)
|
2014-10-27 12:40:48 -07:00
|
|
|
if _, err = codable.EncodeVarint(w, int64(m.series.chunkDescsOffset)); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-11-07 16:01:34 -08:00
|
|
|
if _, err = codable.EncodeVarint(w, int64(m.series.savedFirstTime)); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-10-24 11:27:27 -07:00
|
|
|
if _, err = codable.EncodeVarint(w, int64(len(m.series.chunkDescs))); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for i, chunkDesc := range m.series.chunkDescs {
|
|
|
|
if m.series.headChunkPersisted || i < len(m.series.chunkDescs)-1 {
|
|
|
|
if _, err = codable.EncodeVarint(w, int64(chunkDesc.firstTime())); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, err = codable.EncodeVarint(w, int64(chunkDesc.lastTime())); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// This is the non-persisted head chunk. Fully marshal it.
|
|
|
|
if err = w.WriteByte(chunkType(chunkDesc.chunk)); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err = chunkDesc.chunk.marshal(w); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2014-06-06 02:55:53 -07:00
|
|
|
if err != nil {
|
2014-10-24 11:27:27 -07:00
|
|
|
return
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-10-24 11:27:27 -07:00
|
|
|
}
|
|
|
|
if err = w.Flush(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if realNumberOfSeries != numberOfSeriesInHeader {
|
|
|
|
// The number of series has changed in the meantime.
|
|
|
|
// Rewrite it in the header.
|
|
|
|
if _, err = f.Seek(int64(numberOfSeriesOffset), os.SEEK_SET); err != nil {
|
|
|
|
return
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
2014-10-24 11:27:27 -07:00
|
|
|
if err = codable.EncodeUint64(f, realNumberOfSeries); err != nil {
|
|
|
|
return
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-10-24 11:27:27 -07:00
|
|
|
return
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// loadSeriesMapAndHeads loads the fingerprint to memory-series mapping and all
|
2014-11-05 11:02:45 -08:00
|
|
|
// open (non-full) head chunks. If recoverable corruption is detected, or if the
|
|
|
|
// dirty flag was set from the beginning, crash recovery is run, which might
|
|
|
|
// take a while. If an unrecoverable error is encountered, it is returned. Call
|
|
|
|
// this method during start-up while nothing else is running in storage
|
|
|
|
// land. This method is utterly goroutine-unsafe.
|
|
|
|
func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, err error) {
|
2014-10-23 06:18:32 -07:00
|
|
|
var chunksTotal, chunkDescsTotal int64
|
2014-11-05 11:02:45 -08:00
|
|
|
fingerprintToSeries := make(map[clientmodel.Fingerprint]*memorySeries)
|
|
|
|
sm = &seriesMap{m: fingerprintToSeries}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if sm != nil && p.dirty {
|
|
|
|
glog.Warning("Persistence layer appears dirty.")
|
2014-11-20 12:03:51 -08:00
|
|
|
err = p.recoverFromCrash(fingerprintToSeries)
|
2014-11-05 11:02:45 -08:00
|
|
|
if err != nil {
|
|
|
|
sm = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
atomic.AddInt64(&numMemChunks, chunksTotal)
|
2014-11-27 09:25:03 -08:00
|
|
|
numMemChunkDescs.Add(float64(chunkDescsTotal))
|
2014-11-05 11:02:45 -08:00
|
|
|
}
|
|
|
|
}()
|
2014-10-23 06:18:32 -07:00
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
f, err := os.Open(p.headsFileName())
|
2014-09-10 09:41:52 -07:00
|
|
|
if os.IsNotExist(err) {
|
2014-11-05 11:02:45 -08:00
|
|
|
return sm, nil
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
if err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not open heads file:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
r := bufio.NewReaderSize(f, fileBufSize)
|
|
|
|
|
|
|
|
buf := make([]byte, len(headsMagicString))
|
|
|
|
if _, err := io.ReadFull(r, buf); err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not read from heads file:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
magic := string(buf)
|
|
|
|
if magic != headsMagicString {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warningf(
|
2014-09-10 09:41:52 -07:00
|
|
|
"unexpected magic string, want %q, got %q",
|
|
|
|
headsMagicString, magic,
|
|
|
|
)
|
2014-11-05 11:02:45 -08:00
|
|
|
p.dirty = true
|
|
|
|
return
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
if version, err := binary.ReadVarint(r); version != headsFormatVersion || err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warningf("unknown heads format version, want %d", headsFormatVersion)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
2014-10-24 11:27:27 -07:00
|
|
|
numSeries, err := codable.DecodeUint64(r)
|
2014-09-10 09:41:52 -07:00
|
|
|
if err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not decode number of series:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
for ; numSeries > 0; numSeries-- {
|
2014-09-16 06:47:24 -07:00
|
|
|
seriesFlags, err := r.ReadByte()
|
|
|
|
if err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not read series flags:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-16 06:47:24 -07:00
|
|
|
}
|
|
|
|
headChunkPersisted := seriesFlags&flagHeadChunkPersisted != 0
|
2014-09-23 10:21:10 -07:00
|
|
|
fp, err := codable.DecodeUint64(r)
|
2014-09-10 09:41:52 -07:00
|
|
|
if err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not decode fingerprint:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
2014-09-23 10:21:10 -07:00
|
|
|
var metric codable.Metric
|
2014-09-10 09:41:52 -07:00
|
|
|
if err := metric.UnmarshalFromReader(r); err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not decode metric:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
2014-10-27 12:40:48 -07:00
|
|
|
chunkDescsOffset, err := binary.ReadVarint(r)
|
|
|
|
if err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not decode chunk descriptor offset:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-10-27 12:40:48 -07:00
|
|
|
}
|
2014-11-07 16:01:34 -08:00
|
|
|
savedFirstTime, err := binary.ReadVarint(r)
|
|
|
|
if err != nil {
|
|
|
|
glog.Warning("Could not decode saved first time:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
|
|
|
}
|
2014-09-10 09:41:52 -07:00
|
|
|
numChunkDescs, err := binary.ReadVarint(r)
|
|
|
|
if err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not decode number of chunk descriptors:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
2014-10-15 06:53:05 -07:00
|
|
|
chunkDescs := make([]*chunkDesc, numChunkDescs)
|
2014-10-23 06:18:32 -07:00
|
|
|
chunkDescsTotal += numChunkDescs
|
2014-09-10 09:41:52 -07:00
|
|
|
|
2014-09-16 06:47:24 -07:00
|
|
|
for i := int64(0); i < numChunkDescs; i++ {
|
|
|
|
if headChunkPersisted || i < numChunkDescs-1 {
|
|
|
|
firstTime, err := binary.ReadVarint(r)
|
|
|
|
if err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not decode first time:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-16 06:47:24 -07:00
|
|
|
}
|
|
|
|
lastTime, err := binary.ReadVarint(r)
|
|
|
|
if err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not decode last time:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-16 06:47:24 -07:00
|
|
|
}
|
|
|
|
chunkDescs[i] = &chunkDesc{
|
2014-10-15 06:53:05 -07:00
|
|
|
chunkFirstTime: clientmodel.Timestamp(firstTime),
|
|
|
|
chunkLastTime: clientmodel.Timestamp(lastTime),
|
2014-09-16 06:47:24 -07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Non-persisted head chunk.
|
2014-10-23 06:18:32 -07:00
|
|
|
chunksTotal++
|
2014-09-16 06:47:24 -07:00
|
|
|
chunkType, err := r.ReadByte()
|
|
|
|
if err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not decode chunk type:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-16 06:47:24 -07:00
|
|
|
}
|
|
|
|
chunk := chunkForType(chunkType)
|
|
|
|
if err := chunk.unmarshal(r); err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
glog.Warning("Could not decode chunk type:", err)
|
|
|
|
p.dirty = true
|
|
|
|
return sm, nil
|
2014-09-16 06:47:24 -07:00
|
|
|
}
|
2014-10-22 10:21:23 -07:00
|
|
|
chunkDescs[i] = newChunkDesc(chunk)
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fingerprintToSeries[clientmodel.Fingerprint(fp)] = &memorySeries{
|
2014-09-16 06:47:24 -07:00
|
|
|
metric: clientmodel.Metric(metric),
|
|
|
|
chunkDescs: chunkDescs,
|
2014-10-27 12:40:48 -07:00
|
|
|
chunkDescsOffset: int(chunkDescsOffset),
|
2014-11-07 16:01:34 -08:00
|
|
|
savedFirstTime: clientmodel.Timestamp(savedFirstTime),
|
2014-09-16 06:47:24 -07:00
|
|
|
headChunkPersisted: headChunkPersisted,
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
}
|
2014-11-05 11:02:45 -08:00
|
|
|
return sm, nil
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// dropChunks deletes all chunks from a series whose last sample time is before
|
2014-11-10 09:22:08 -08:00
|
|
|
// beforeTime. It returns the timestamp of the first sample in the oldest chunk
|
|
|
|
// _not_ dropped, the number of deleted chunks, and true if all chunks of the
|
|
|
|
// series have been deleted (in which case the returned timestamp will be 0 and
|
|
|
|
// must be ignored). It is the caller's responsibility to make sure nothing is
|
|
|
|
// persisted or loaded for the same fingerprint concurrently.
|
|
|
|
func (p *persistence) dropChunks(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) (
|
|
|
|
firstTimeNotDropped clientmodel.Timestamp,
|
|
|
|
numDropped int,
|
|
|
|
allDropped bool,
|
|
|
|
err error,
|
|
|
|
) {
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
p.setDirty(true)
|
|
|
|
}
|
|
|
|
}()
|
2014-06-06 02:55:53 -07:00
|
|
|
f, err := p.openChunkFileForReading(fp)
|
|
|
|
if os.IsNotExist(err) {
|
2014-11-10 09:22:08 -08:00
|
|
|
return 0, 0, true, nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
if err != nil {
|
2014-11-10 09:22:08 -08:00
|
|
|
return 0, 0, false, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
// Find the first chunk that should be kept.
|
2014-10-27 12:40:48 -07:00
|
|
|
var i int
|
2014-11-10 09:22:08 -08:00
|
|
|
var firstTime clientmodel.Timestamp
|
2014-10-27 12:40:48 -07:00
|
|
|
for ; ; i++ {
|
2014-11-10 09:22:08 -08:00
|
|
|
_, err := f.Seek(p.offsetForChunkIndex(i)+chunkHeaderFirstTimeOffset, os.SEEK_SET)
|
2014-06-06 02:55:53 -07:00
|
|
|
if err != nil {
|
2014-11-10 09:22:08 -08:00
|
|
|
return 0, 0, false, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-11-10 09:22:08 -08:00
|
|
|
timeBuf := make([]byte, 16)
|
|
|
|
_, err = io.ReadAtLeast(f, timeBuf, 16)
|
2014-06-06 02:55:53 -07:00
|
|
|
if err == io.EOF {
|
|
|
|
// We ran into the end of the file without finding any chunks that should
|
|
|
|
// be kept. Remove the whole file.
|
2014-10-22 10:21:23 -07:00
|
|
|
chunkOps.WithLabelValues(purge).Add(float64(i))
|
2014-06-06 02:55:53 -07:00
|
|
|
if err := os.Remove(f.Name()); err != nil {
|
2014-11-10 09:22:08 -08:00
|
|
|
return 0, 0, true, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-11-10 09:22:08 -08:00
|
|
|
return 0, i, true, nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
if err != nil {
|
2014-11-10 09:22:08 -08:00
|
|
|
return 0, 0, false, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-11-10 09:22:08 -08:00
|
|
|
lastTime := clientmodel.Timestamp(binary.LittleEndian.Uint64(timeBuf[8:]))
|
2014-06-06 02:55:53 -07:00
|
|
|
if !lastTime.Before(beforeTime) {
|
2014-11-10 09:22:08 -08:00
|
|
|
firstTime = clientmodel.Timestamp(binary.LittleEndian.Uint64(timeBuf))
|
2014-10-22 10:21:23 -07:00
|
|
|
chunkOps.WithLabelValues(purge).Add(float64(i))
|
2014-06-06 02:55:53 -07:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We've found the first chunk that should be kept. Seek backwards to the
|
|
|
|
// beginning of its header and start copying everything from there into a new
|
|
|
|
// file.
|
2014-11-10 09:22:08 -08:00
|
|
|
_, err = f.Seek(-(chunkHeaderFirstTimeOffset + 16), os.SEEK_CUR)
|
2014-06-06 02:55:53 -07:00
|
|
|
if err != nil {
|
2014-11-10 09:22:08 -08:00
|
|
|
return 0, 0, false, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-15 08:07:12 -07:00
|
|
|
temp, err := os.OpenFile(p.tempFileNameForFingerprint(fp), os.O_WRONLY|os.O_CREATE, 0640)
|
2014-06-06 02:55:53 -07:00
|
|
|
if err != nil {
|
2014-11-10 09:22:08 -08:00
|
|
|
return 0, 0, false, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
defer temp.Close()
|
|
|
|
|
|
|
|
if _, err := io.Copy(temp, f); err != nil {
|
2014-11-10 09:22:08 -08:00
|
|
|
return 0, 0, false, err
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-27 12:40:48 -07:00
|
|
|
if err := os.Rename(p.tempFileNameForFingerprint(fp), p.fileNameForFingerprint(fp)); err != nil {
|
2014-11-10 09:22:08 -08:00
|
|
|
return 0, 0, false, err
|
2014-10-27 12:40:48 -07:00
|
|
|
}
|
2014-11-10 09:22:08 -08:00
|
|
|
return firstTime, i, false, nil
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// indexMetric queues the given metric for addition to the indexes needed by
|
|
|
|
// getFingerprintsForLabelPair, getLabelValuesForLabelName, and
|
|
|
|
// getFingerprintsModifiedBefore. If the queue is full, this method blocks
|
|
|
|
// until the metric can be queued. This method is goroutine-safe.
|
2014-10-28 11:01:41 -07:00
|
|
|
func (p *persistence) indexMetric(fp clientmodel.Fingerprint, m clientmodel.Metric) {
|
2014-09-23 10:21:10 -07:00
|
|
|
p.indexingQueue <- indexingOp{fp, m, add}
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// unindexMetric queues references to the given metric for removal from the
|
|
|
|
// indexes used for getFingerprintsForLabelPair, getLabelValuesForLabelName, and
|
|
|
|
// getFingerprintsModifiedBefore. The index of fingerprints to archived metrics
|
|
|
|
// is not affected by this removal. (In fact, never call this method for an
|
|
|
|
// archived metric. To drop an archived metric, call dropArchivedFingerprint.)
|
|
|
|
// If the queue is full, this method blocks until the metric can be queued. This
|
|
|
|
// method is goroutine-safe.
|
2014-10-28 11:01:41 -07:00
|
|
|
func (p *persistence) unindexMetric(fp clientmodel.Fingerprint, m clientmodel.Metric) {
|
2014-09-23 10:21:10 -07:00
|
|
|
p.indexingQueue <- indexingOp{fp, m, remove}
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// waitForIndexing waits until all items in the indexing queue are processed. If
|
|
|
|
// queue processing is currently on hold (to gather more ops for batching), this
|
|
|
|
// method will trigger an immediate start of processing. This method is
|
|
|
|
// goroutine-safe.
|
|
|
|
func (p *persistence) waitForIndexing() {
|
2014-09-24 07:32:07 -07:00
|
|
|
wait := make(chan int)
|
|
|
|
for {
|
|
|
|
p.indexingFlush <- wait
|
|
|
|
if <-wait == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// archiveMetric persists the mapping of the given fingerprint to the given
|
|
|
|
// metric, together with the first and last timestamp of the series belonging to
|
2014-11-10 09:33:31 -08:00
|
|
|
// the metric. The caller must have locked the fingerprint.
|
2014-10-07 10:11:24 -07:00
|
|
|
func (p *persistence) archiveMetric(
|
2014-09-14 06:33:56 -07:00
|
|
|
fp clientmodel.Fingerprint, m clientmodel.Metric, first, last clientmodel.Timestamp,
|
2014-09-10 09:41:52 -07:00
|
|
|
) error {
|
2014-09-23 10:21:10 -07:00
|
|
|
if err := p.archivedFingerprintToMetrics.Put(codable.Fingerprint(fp), codable.Metric(m)); err != nil {
|
2014-11-10 09:22:08 -08:00
|
|
|
p.setDirty(true)
|
2014-09-14 06:33:56 -07:00
|
|
|
return err
|
|
|
|
}
|
2014-09-23 10:21:10 -07:00
|
|
|
if err := p.archivedFingerprintToTimeRange.Put(codable.Fingerprint(fp), codable.TimeRange{First: first, Last: last}); err != nil {
|
2014-11-10 09:22:08 -08:00
|
|
|
p.setDirty(true)
|
2014-09-14 06:33:56 -07:00
|
|
|
return err
|
|
|
|
}
|
2014-09-10 09:41:52 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// hasArchivedMetric returns whether the archived metric for the given
|
|
|
|
// fingerprint exists and if yes, what the first and last timestamp in the
|
|
|
|
// corresponding series is. This method is goroutine-safe.
|
|
|
|
func (p *persistence) hasArchivedMetric(fp clientmodel.Fingerprint) (
|
2014-09-10 09:41:52 -07:00
|
|
|
hasMetric bool, firstTime, lastTime clientmodel.Timestamp, err error,
|
|
|
|
) {
|
2014-09-16 06:47:24 -07:00
|
|
|
firstTime, lastTime, hasMetric, err = p.archivedFingerprintToTimeRange.Lookup(fp)
|
2014-09-10 09:41:52 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-10 09:22:08 -08:00
|
|
|
// updateArchivedTimeRange updates an archived time range. The caller must make
|
|
|
|
// sure that the fingerprint is currently archived (the time range will
|
|
|
|
// otherwise be added without the corresponding metric in the archive).
|
|
|
|
func (p *persistence) updateArchivedTimeRange(
|
|
|
|
fp clientmodel.Fingerprint, first, last clientmodel.Timestamp,
|
|
|
|
) error {
|
|
|
|
return p.archivedFingerprintToTimeRange.Put(codable.Fingerprint(fp), codable.TimeRange{First: first, Last: last})
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// getFingerprintsModifiedBefore returns the fingerprints of archived timeseries
|
|
|
|
// that have live samples before the provided timestamp. This method is
|
|
|
|
// goroutine-safe.
|
|
|
|
func (p *persistence) getFingerprintsModifiedBefore(beforeTime clientmodel.Timestamp) ([]clientmodel.Fingerprint, error) {
|
2014-09-24 07:32:07 -07:00
|
|
|
var fp codable.Fingerprint
|
|
|
|
var tr codable.TimeRange
|
|
|
|
fps := []clientmodel.Fingerprint{}
|
|
|
|
p.archivedFingerprintToTimeRange.ForEach(func(kv index.KeyValueAccessor) error {
|
|
|
|
if err := kv.Value(&tr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if tr.First.Before(beforeTime) {
|
|
|
|
if err := kv.Key(&fp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fps = append(fps, clientmodel.Fingerprint(fp))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return fps, nil
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// getArchivedMetric retrieves the archived metric with the given
|
|
|
|
// fingerprint. This method is goroutine-safe.
|
|
|
|
func (p *persistence) getArchivedMetric(fp clientmodel.Fingerprint) (clientmodel.Metric, error) {
|
2014-09-16 06:47:24 -07:00
|
|
|
metric, _, err := p.archivedFingerprintToMetrics.Lookup(fp)
|
2014-09-14 06:33:56 -07:00
|
|
|
return metric, err
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// dropArchivedMetric deletes an archived fingerprint and its corresponding
|
|
|
|
// metric entirely. It also queues the metric for un-indexing (no need to call
|
2014-11-10 09:33:31 -08:00
|
|
|
// unindexMetric for the deleted metric.) The caller must have locked the
|
|
|
|
// fingerprint.
|
2014-11-10 09:22:08 -08:00
|
|
|
func (p *persistence) dropArchivedMetric(fp clientmodel.Fingerprint) (err error) {
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
p.setDirty(true)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
metric, err := p.getArchivedMetric(fp)
|
2014-09-14 06:33:56 -07:00
|
|
|
if err != nil || metric == nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-09-23 10:21:10 -07:00
|
|
|
if err := p.archivedFingerprintToMetrics.Delete(codable.Fingerprint(fp)); err != nil {
|
2014-09-14 06:33:56 -07:00
|
|
|
return err
|
|
|
|
}
|
2014-09-23 10:21:10 -07:00
|
|
|
if err := p.archivedFingerprintToTimeRange.Delete(codable.Fingerprint(fp)); err != nil {
|
2014-09-14 06:33:56 -07:00
|
|
|
return err
|
|
|
|
}
|
2014-10-28 11:01:41 -07:00
|
|
|
p.unindexMetric(fp, metric)
|
2014-09-23 10:21:10 -07:00
|
|
|
return nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-08-21 13:06:11 -07:00
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// unarchiveMetric deletes an archived fingerprint and its metric, but (in
|
2014-11-05 11:02:45 -08:00
|
|
|
// contrast to dropArchivedMetric) does not un-index the metric. If a metric
|
|
|
|
// was actually deleted, the method returns true and the first time of the
|
2014-11-10 09:33:31 -08:00
|
|
|
// deleted metric. The caller must have locked the fingerprint.
|
|
|
|
func (p *persistence) unarchiveMetric(fp clientmodel.Fingerprint) (
|
|
|
|
deletedAnything bool,
|
|
|
|
firstDeletedTime clientmodel.Timestamp,
|
|
|
|
err error,
|
|
|
|
) {
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
p.setDirty(true)
|
|
|
|
}
|
|
|
|
}()
|
2014-10-07 10:11:24 -07:00
|
|
|
|
2014-11-05 11:02:45 -08:00
|
|
|
firstTime, _, has, err := p.archivedFingerprintToTimeRange.Lookup(fp)
|
2014-09-14 06:33:56 -07:00
|
|
|
if err != nil || !has {
|
2014-11-05 11:02:45 -08:00
|
|
|
return false, firstTime, err
|
2014-09-14 06:33:56 -07:00
|
|
|
}
|
2014-09-23 10:21:10 -07:00
|
|
|
if err := p.archivedFingerprintToMetrics.Delete(codable.Fingerprint(fp)); err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
return false, firstTime, err
|
2014-09-14 06:33:56 -07:00
|
|
|
}
|
2014-09-23 10:21:10 -07:00
|
|
|
if err := p.archivedFingerprintToTimeRange.Delete(codable.Fingerprint(fp)); err != nil {
|
2014-11-05 11:02:45 -08:00
|
|
|
return false, firstTime, err
|
2014-09-14 06:33:56 -07:00
|
|
|
}
|
2014-11-05 11:02:45 -08:00
|
|
|
return true, firstTime, nil
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// close flushes the indexing queue and other buffered data and releases any
|
2014-11-05 11:02:45 -08:00
|
|
|
// held resources. It also removes the dirty marker file if successful and if
|
|
|
|
// the persistence is currently not marked as dirty.
|
2014-10-07 10:11:24 -07:00
|
|
|
func (p *persistence) close() error {
|
2014-09-23 10:21:10 -07:00
|
|
|
close(p.indexingQueue)
|
|
|
|
<-p.indexingStopped
|
|
|
|
|
2015-01-14 07:52:09 -08:00
|
|
|
var lastError, dirtyFileRemoveError error
|
2014-09-16 06:47:24 -07:00
|
|
|
if err := p.archivedFingerprintToMetrics.Close(); err != nil {
|
2014-09-10 09:41:52 -07:00
|
|
|
lastError = err
|
|
|
|
glog.Error("Error closing archivedFingerprintToMetric index DB: ", err)
|
|
|
|
}
|
2014-09-16 06:47:24 -07:00
|
|
|
if err := p.archivedFingerprintToTimeRange.Close(); err != nil {
|
2014-09-10 09:41:52 -07:00
|
|
|
lastError = err
|
|
|
|
glog.Error("Error closing archivedFingerprintToTimeRange index DB: ", err)
|
|
|
|
}
|
2014-09-16 06:47:24 -07:00
|
|
|
if err := p.labelPairToFingerprints.Close(); err != nil {
|
2014-09-10 09:41:52 -07:00
|
|
|
lastError = err
|
|
|
|
glog.Error("Error closing labelPairToFingerprints index DB: ", err)
|
|
|
|
}
|
2014-09-16 06:47:24 -07:00
|
|
|
if err := p.labelNameToLabelValues.Close(); err != nil {
|
2014-09-10 09:41:52 -07:00
|
|
|
lastError = err
|
|
|
|
glog.Error("Error closing labelNameToLabelValues index DB: ", err)
|
|
|
|
}
|
2014-11-05 11:02:45 -08:00
|
|
|
if lastError == nil && !p.isDirty() {
|
2015-01-14 07:52:09 -08:00
|
|
|
dirtyFileRemoveError = os.Remove(p.dirtyFileName)
|
|
|
|
}
|
|
|
|
if err := p.fLock.Release(); err != nil {
|
|
|
|
lastError = err
|
|
|
|
glog.Error("Error releasing file lock: ", err)
|
|
|
|
}
|
|
|
|
if dirtyFileRemoveError != nil {
|
|
|
|
// On Windows, removing the dirty file before unlocking is not
|
|
|
|
// possible. So remove it here if it failed above.
|
|
|
|
lastError = os.Remove(p.dirtyFileName)
|
2014-11-05 11:02:45 -08:00
|
|
|
}
|
2014-09-10 09:41:52 -07:00
|
|
|
return lastError
|
|
|
|
}
|
|
|
|
|
2014-10-15 08:07:12 -07:00
|
|
|
func (p *persistence) dirNameForFingerprint(fp clientmodel.Fingerprint) string {
|
2014-09-10 09:41:52 -07:00
|
|
|
fpStr := fp.String()
|
2014-11-20 12:03:51 -08:00
|
|
|
return path.Join(p.basePath, fpStr[0:seriesDirNameLen])
|
2014-10-15 08:07:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *persistence) fileNameForFingerprint(fp clientmodel.Fingerprint) string {
|
|
|
|
fpStr := fp.String()
|
2014-11-20 12:03:51 -08:00
|
|
|
return path.Join(p.basePath, fpStr[0:seriesDirNameLen], fpStr[seriesDirNameLen:]+seriesFileSuffix)
|
2014-10-15 08:07:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *persistence) tempFileNameForFingerprint(fp clientmodel.Fingerprint) string {
|
|
|
|
fpStr := fp.String()
|
2014-11-20 12:03:51 -08:00
|
|
|
return path.Join(p.basePath, fpStr[0:seriesDirNameLen], fpStr[seriesDirNameLen:]+seriesTempFileSuffix)
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
func (p *persistence) openChunkFileForWriting(fp clientmodel.Fingerprint) (*os.File, error) {
|
2014-10-15 08:07:12 -07:00
|
|
|
if err := os.MkdirAll(p.dirNameForFingerprint(fp), 0700); err != nil {
|
2014-09-10 09:41:52 -07:00
|
|
|
return nil, err
|
|
|
|
}
|
2014-11-27 11:46:45 -08:00
|
|
|
f, err := os.OpenFile(p.fileNameForFingerprint(fp), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
|
|
|
|
if err != nil {
|
|
|
|
return f, err
|
|
|
|
}
|
|
|
|
offset, err := f.Seek(0, os.SEEK_CUR)
|
|
|
|
if offset%int64(chunkHeaderLen+p.chunkLen) != 0 {
|
|
|
|
return f, fmt.Errorf(
|
|
|
|
"size of series file for fingerprint %v is %d, which is not a multiple of the chunk length %d",
|
|
|
|
fp, offset, chunkHeaderLen+p.chunkLen,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
return f, err
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
func (p *persistence) openChunkFileForReading(fp clientmodel.Fingerprint) (*os.File, error) {
|
2014-10-15 08:07:12 -07:00
|
|
|
return os.Open(p.fileNameForFingerprint(fp))
|
2014-09-10 09:41:52 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func writeChunkHeader(w io.Writer, c chunk) error {
|
|
|
|
header := make([]byte, chunkHeaderLen)
|
|
|
|
header[chunkHeaderTypeOffset] = chunkType(c)
|
|
|
|
binary.LittleEndian.PutUint64(header[chunkHeaderFirstTimeOffset:], uint64(c.firstTime()))
|
|
|
|
binary.LittleEndian.PutUint64(header[chunkHeaderLastTimeOffset:], uint64(c.lastTime()))
|
|
|
|
_, err := w.Write(header)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
func (p *persistence) offsetForChunkIndex(i int) int64 {
|
2014-09-10 09:41:52 -07:00
|
|
|
return int64(i * (chunkHeaderLen + p.chunkLen))
|
|
|
|
}
|
|
|
|
|
2014-10-27 12:40:48 -07:00
|
|
|
func (p *persistence) chunkIndexForOffset(offset int64) (int, error) {
|
|
|
|
if int(offset)%(chunkHeaderLen+p.chunkLen) != 0 {
|
|
|
|
return -1, fmt.Errorf(
|
|
|
|
"offset %d is not a multiple of on-disk chunk length %d",
|
|
|
|
offset, chunkHeaderLen+p.chunkLen,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
return int(offset) / (chunkHeaderLen + p.chunkLen), nil
|
|
|
|
}
|
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
func (p *persistence) headsFileName() string {
|
2014-09-10 09:41:52 -07:00
|
|
|
return path.Join(p.basePath, headsFileName)
|
|
|
|
}
|
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
func (p *persistence) headsTempFileName() string {
|
|
|
|
return path.Join(p.basePath, headsTempFileName)
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
func (p *persistence) processIndexingQueue() {
|
2014-09-23 10:21:10 -07:00
|
|
|
batchSize := 0
|
|
|
|
nameToValues := index.LabelNameLabelValuesMapping{}
|
|
|
|
pairToFPs := index.LabelPairFingerprintsMapping{}
|
|
|
|
batchTimeout := time.NewTimer(indexingBatchTimeout)
|
|
|
|
defer batchTimeout.Stop()
|
|
|
|
|
|
|
|
commitBatch := func() {
|
2014-09-24 07:51:18 -07:00
|
|
|
p.indexingBatchSizes.Observe(float64(batchSize))
|
2014-09-24 08:18:48 -07:00
|
|
|
defer func(begin time.Time) {
|
|
|
|
p.indexingBatchLatency.Observe(float64(time.Since(begin) / time.Millisecond))
|
|
|
|
}(time.Now())
|
2014-09-24 07:51:18 -07:00
|
|
|
|
2014-09-23 10:21:10 -07:00
|
|
|
if err := p.labelPairToFingerprints.IndexBatch(pairToFPs); err != nil {
|
|
|
|
glog.Error("Error indexing label pair to fingerprints batch: ", err)
|
|
|
|
}
|
|
|
|
if err := p.labelNameToLabelValues.IndexBatch(nameToValues); err != nil {
|
|
|
|
glog.Error("Error indexing label name to label values batch: ", err)
|
|
|
|
}
|
|
|
|
batchSize = 0
|
|
|
|
nameToValues = index.LabelNameLabelValuesMapping{}
|
|
|
|
pairToFPs = index.LabelPairFingerprintsMapping{}
|
2014-09-24 05:41:38 -07:00
|
|
|
batchTimeout.Reset(indexingBatchTimeout)
|
2014-09-23 10:21:10 -07:00
|
|
|
}
|
|
|
|
|
2014-09-24 05:41:38 -07:00
|
|
|
var flush chan chan int
|
2014-09-23 10:21:10 -07:00
|
|
|
loop:
|
|
|
|
for {
|
2014-09-24 05:41:38 -07:00
|
|
|
// Only process flush requests if the queue is currently empty.
|
|
|
|
if len(p.indexingQueue) == 0 {
|
|
|
|
flush = p.indexingFlush
|
|
|
|
} else {
|
|
|
|
flush = nil
|
|
|
|
}
|
2014-09-23 10:21:10 -07:00
|
|
|
select {
|
|
|
|
case <-batchTimeout.C:
|
2014-10-17 04:55:54 -07:00
|
|
|
// Only commit if we have something to commit _and_
|
|
|
|
// nothing is waiting in the queue to be picked up. That
|
|
|
|
// prevents a death spiral if the LookupSet calls below
|
|
|
|
// are slow for some reason.
|
|
|
|
if batchSize > 0 && len(p.indexingQueue) == 0 {
|
2014-09-24 05:41:38 -07:00
|
|
|
commitBatch()
|
|
|
|
} else {
|
|
|
|
batchTimeout.Reset(indexingBatchTimeout)
|
|
|
|
}
|
|
|
|
case r := <-flush:
|
2014-09-23 10:21:10 -07:00
|
|
|
if batchSize > 0 {
|
|
|
|
commitBatch()
|
|
|
|
}
|
2014-09-24 05:41:38 -07:00
|
|
|
r <- len(p.indexingQueue)
|
2014-09-23 10:21:10 -07:00
|
|
|
case op, ok := <-p.indexingQueue:
|
|
|
|
if !ok {
|
|
|
|
if batchSize > 0 {
|
|
|
|
commitBatch()
|
|
|
|
}
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
|
|
|
|
batchSize++
|
|
|
|
for ln, lv := range op.metric {
|
|
|
|
lp := metric.LabelPair{Name: ln, Value: lv}
|
|
|
|
baseFPs, ok := pairToFPs[lp]
|
|
|
|
if !ok {
|
|
|
|
var err error
|
|
|
|
baseFPs, _, err = p.labelPairToFingerprints.LookupSet(lp)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Error looking up label pair %v: %s", lp, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
pairToFPs[lp] = baseFPs
|
|
|
|
}
|
|
|
|
baseValues, ok := nameToValues[ln]
|
|
|
|
if !ok {
|
|
|
|
var err error
|
|
|
|
baseValues, _, err = p.labelNameToLabelValues.LookupSet(ln)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Error looking up label name %v: %s", ln, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
nameToValues[ln] = baseValues
|
|
|
|
}
|
|
|
|
switch op.opType {
|
|
|
|
case add:
|
|
|
|
baseFPs[op.fingerprint] = struct{}{}
|
|
|
|
baseValues[lv] = struct{}{}
|
|
|
|
case remove:
|
|
|
|
delete(baseFPs, op.fingerprint)
|
|
|
|
if len(baseFPs) == 0 {
|
|
|
|
delete(baseValues, lv)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
panic("unknown op type")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if batchSize >= indexingMaxBatchSize {
|
|
|
|
commitBatch()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(p.indexingStopped)
|
|
|
|
}
|
|
|
|
|
2014-09-10 09:41:52 -07:00
|
|
|
// exists returns true when the given file or directory exists.
|
|
|
|
func exists(path string) (bool, error) {
|
|
|
|
_, err := os.Stat(path)
|
|
|
|
if err == nil {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return false, err
|
2014-08-21 13:06:11 -07:00
|
|
|
}
|