mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-27 05:32:27 -08:00
Populate first and last time in the chunk descriptor earlier
The First time is kind of trivial as we always know it when we create a new chunkDesc. The last time is only know when the chunk is closed, so we have to set it at that time. The change saves a lot of digging down into the chunk itself. Especially the last time is relative expensive as it involves the creation of an iterator. The first time access now doesn't require locking, which is also a nice gain.
This commit is contained in:
parent
738e6f41d4
commit
ef3ab96111
|
@ -56,11 +56,11 @@ const (
|
||||||
// chunkDesc contains meta-data for a chunk. Many of its methods are
|
// chunkDesc contains meta-data for a chunk. Many of its methods are
|
||||||
// goroutine-safe proxies for chunk methods.
|
// goroutine-safe proxies for chunk methods.
|
||||||
type chunkDesc struct {
|
type chunkDesc struct {
|
||||||
sync.Mutex
|
sync.Mutex // TODO(beorn7): Try out if an RWMutex would help here.
|
||||||
c chunk // nil if chunk is evicted.
|
c chunk // nil if chunk is evicted.
|
||||||
rCnt int
|
rCnt int
|
||||||
chunkFirstTime model.Time // Used if chunk is evicted.
|
chunkFirstTime model.Time // Populated at creation.
|
||||||
chunkLastTime model.Time // Used if chunk is evicted.
|
chunkLastTime model.Time // Populated on closing of the chunk, model.Earliest if unset.
|
||||||
|
|
||||||
// evictListElement is nil if the chunk is not in the evict list.
|
// evictListElement is nil if the chunk is not in the evict list.
|
||||||
// evictListElement is _not_ protected by the chunkDesc mutex.
|
// evictListElement is _not_ protected by the chunkDesc mutex.
|
||||||
|
@ -71,11 +71,16 @@ type chunkDesc struct {
|
||||||
// newChunkDesc creates a new chunkDesc pointing to the provided chunk. The
|
// newChunkDesc creates a new chunkDesc pointing to the provided chunk. The
|
||||||
// provided chunk is assumed to be not persisted yet. Therefore, the refCount of
|
// provided chunk is assumed to be not persisted yet. Therefore, the refCount of
|
||||||
// the new chunkDesc is 1 (preventing eviction prior to persisting).
|
// the new chunkDesc is 1 (preventing eviction prior to persisting).
|
||||||
func newChunkDesc(c chunk) *chunkDesc {
|
func newChunkDesc(c chunk, firstTime model.Time) *chunkDesc {
|
||||||
chunkOps.WithLabelValues(createAndPin).Inc()
|
chunkOps.WithLabelValues(createAndPin).Inc()
|
||||||
atomic.AddInt64(&numMemChunks, 1)
|
atomic.AddInt64(&numMemChunks, 1)
|
||||||
numMemChunkDescs.Inc()
|
numMemChunkDescs.Inc()
|
||||||
return &chunkDesc{c: c, rCnt: 1}
|
return &chunkDesc{
|
||||||
|
c: c,
|
||||||
|
rCnt: 1,
|
||||||
|
chunkFirstTime: firstTime,
|
||||||
|
chunkLastTime: model.Earliest,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cd *chunkDesc) add(s *model.SamplePair) []chunk {
|
func (cd *chunkDesc) add(s *model.SamplePair) []chunk {
|
||||||
|
@ -124,25 +129,29 @@ func (cd *chunkDesc) refCount() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cd *chunkDesc) firstTime() model.Time {
|
func (cd *chunkDesc) firstTime() model.Time {
|
||||||
cd.Lock()
|
// No lock required, will never be modified.
|
||||||
defer cd.Unlock()
|
return cd.chunkFirstTime
|
||||||
|
|
||||||
if cd.c == nil {
|
|
||||||
return cd.chunkFirstTime
|
|
||||||
}
|
|
||||||
return cd.c.firstTime()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cd *chunkDesc) lastTime() model.Time {
|
func (cd *chunkDesc) lastTime() model.Time {
|
||||||
cd.Lock()
|
cd.Lock()
|
||||||
defer cd.Unlock()
|
defer cd.Unlock()
|
||||||
|
|
||||||
if cd.c == nil {
|
if cd.chunkLastTime != model.Earliest || cd.c == nil {
|
||||||
return cd.chunkLastTime
|
return cd.chunkLastTime
|
||||||
}
|
}
|
||||||
return cd.c.newIterator().lastTimestamp()
|
return cd.c.newIterator().lastTimestamp()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cd *chunkDesc) maybePopulateLastTime() {
|
||||||
|
cd.Lock()
|
||||||
|
defer cd.Unlock()
|
||||||
|
|
||||||
|
if cd.chunkLastTime == model.Earliest && cd.c != nil {
|
||||||
|
cd.chunkLastTime = cd.c.newIterator().lastTimestamp()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (cd *chunkDesc) lastSamplePair() *model.SamplePair {
|
func (cd *chunkDesc) lastSamplePair() *model.SamplePair {
|
||||||
cd.Lock()
|
cd.Lock()
|
||||||
defer cd.Unlock()
|
defer cd.Unlock()
|
||||||
|
@ -198,8 +207,10 @@ func (cd *chunkDesc) maybeEvict() bool {
|
||||||
if cd.rCnt != 0 {
|
if cd.rCnt != 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
cd.chunkFirstTime = cd.c.firstTime()
|
// Last opportunity to populate chunkLastTime.
|
||||||
cd.chunkLastTime = cd.c.newIterator().lastTimestamp()
|
if cd.chunkLastTime == model.Earliest {
|
||||||
|
cd.chunkLastTime = cd.c.newIterator().lastTimestamp()
|
||||||
|
}
|
||||||
cd.c = nil
|
cd.c = nil
|
||||||
chunkOps.WithLabelValues(evict).Inc()
|
chunkOps.WithLabelValues(evict).Inc()
|
||||||
atomic.AddInt64(&numMemChunks, -1)
|
atomic.AddInt64(&numMemChunks, -1)
|
||||||
|
|
|
@ -856,7 +856,7 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
|
||||||
p.dirty = true
|
p.dirty = true
|
||||||
return sm, chunksToPersist, nil
|
return sm, chunksToPersist, nil
|
||||||
}
|
}
|
||||||
chunkDescs[i] = newChunkDesc(chunk)
|
chunkDescs[i] = newChunkDesc(chunk, chunk.firstTime())
|
||||||
chunksToPersist++
|
chunksToPersist++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -209,7 +209,7 @@ func newMemorySeries(m model.Metric, chunkDescs []*chunkDesc, modTime time.Time)
|
||||||
// The caller must have locked the fingerprint of the series.
|
// The caller must have locked the fingerprint of the series.
|
||||||
func (s *memorySeries) add(v *model.SamplePair) int {
|
func (s *memorySeries) add(v *model.SamplePair) int {
|
||||||
if len(s.chunkDescs) == 0 || s.headChunkClosed {
|
if len(s.chunkDescs) == 0 || s.headChunkClosed {
|
||||||
newHead := newChunkDesc(newChunk())
|
newHead := newChunkDesc(newChunk(), v.Timestamp)
|
||||||
s.chunkDescs = append(s.chunkDescs, newHead)
|
s.chunkDescs = append(s.chunkDescs, newHead)
|
||||||
s.headChunkClosed = false
|
s.headChunkClosed = false
|
||||||
} else if s.headChunkUsedByIterator && s.head().refCount() > 1 {
|
} else if s.headChunkUsedByIterator && s.head().refCount() > 1 {
|
||||||
|
@ -233,7 +233,12 @@ func (s *memorySeries) add(v *model.SamplePair) int {
|
||||||
s.head().c = chunks[0]
|
s.head().c = chunks[0]
|
||||||
|
|
||||||
for _, c := range chunks[1:] {
|
for _, c := range chunks[1:] {
|
||||||
s.chunkDescs = append(s.chunkDescs, newChunkDesc(c))
|
s.chunkDescs = append(s.chunkDescs, newChunkDesc(c, c.firstTime()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate lastTime of now-closed chunks.
|
||||||
|
for _, cd := range s.chunkDescs[len(s.chunkDescs)-len(chunks) : len(s.chunkDescs)-1] {
|
||||||
|
cd.maybePopulateLastTime()
|
||||||
}
|
}
|
||||||
|
|
||||||
s.lastTime = v.Timestamp
|
s.lastTime = v.Timestamp
|
||||||
|
@ -254,6 +259,7 @@ func (s *memorySeries) maybeCloseHeadChunk() bool {
|
||||||
// Since we cannot modify the head chunk from now on, we
|
// Since we cannot modify the head chunk from now on, we
|
||||||
// don't need to bother with cloning anymore.
|
// don't need to bother with cloning anymore.
|
||||||
s.headChunkUsedByIterator = false
|
s.headChunkUsedByIterator = false
|
||||||
|
s.head().maybePopulateLastTime()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
|
Loading…
Reference in a new issue