don't use cache for nhcb maps

Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
This commit is contained in:
Jeanette Tan 2024-07-03 17:56:48 +08:00 committed by György Krajcsovits
parent 0a321fe4d8
commit 02d5abf60e

View file

@ -887,9 +887,6 @@ type scrapeCache struct {
metadata map[string]*metaEntry metadata map[string]*metaEntry
metrics *scrapeMetrics metrics *scrapeMetrics
nhcbLabels map[uint64]labels.Labels
nhcbBuilder map[uint64]convertnhcb.TempHistogram
} }
// metaEntry holds meta information about a metric. // metaEntry holds meta information about a metric.
@ -913,8 +910,6 @@ func newScrapeCache(metrics *scrapeMetrics) *scrapeCache {
seriesPrev: map[uint64]labels.Labels{}, seriesPrev: map[uint64]labels.Labels{},
metadata: map[string]*metaEntry{}, metadata: map[string]*metaEntry{},
metrics: metrics, metrics: metrics,
nhcbLabels: map[uint64]labels.Labels{},
nhcbBuilder: map[uint64]convertnhcb.TempHistogram{},
} }
} }
@ -1118,11 +1113,6 @@ func (c *scrapeCache) LengthMetadata() int {
return len(c.metadata) return len(c.metadata)
} }
func (c *scrapeCache) resetNhcb() {
c.nhcbLabels = map[uint64]labels.Labels{}
c.nhcbBuilder = map[uint64]convertnhcb.TempHistogram{}
}
func newScrapeLoop(ctx context.Context, func newScrapeLoop(ctx context.Context,
sc scraper, sc scraper,
l log.Logger, l log.Logger,
@ -1500,6 +1490,8 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
e exemplar.Exemplar // escapes to heap so hoisted out of loop e exemplar.Exemplar // escapes to heap so hoisted out of loop
meta metadata.Metadata meta metadata.Metadata
metadataChanged bool metadataChanged bool
nhcbLabels map[uint64]labels.Labels
nhcbBuilder map[uint64]convertnhcb.TempHistogram
) )
exemplars := make([]exemplar.Exemplar, 1) exemplars := make([]exemplar.Exemplar, 1)
@ -1529,6 +1521,11 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
// Take an appender with limits. // Take an appender with limits.
app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema) app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
if sl.convertClassicHistograms {
nhcbLabels = make(map[uint64]labels.Labels)
nhcbBuilder = make(map[uint64]convertnhcb.TempHistogram)
}
defer func() { defer func() {
if err != nil { if err != nil {
return return
@ -1666,16 +1663,16 @@ loop:
case strings.HasSuffix(mName, "_bucket") && lset.Has(labels.BucketLabel): case strings.HasSuffix(mName, "_bucket") && lset.Has(labels.BucketLabel):
le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64) le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64)
if err == nil && !math.IsNaN(le) { if err == nil && !math.IsNaN(le) {
processClassicHistogramSeries(lset, "_bucket", sl.cache, func(hist *convertnhcb.TempHistogram) { processClassicHistogramSeries(lset, "_bucket", nhcbLabels, nhcbBuilder, func(hist *convertnhcb.TempHistogram) {
hist.BucketCounts[le] = val hist.BucketCounts[le] = val
}) })
} }
case strings.HasSuffix(mName, "_count"): case strings.HasSuffix(mName, "_count"):
processClassicHistogramSeries(lset, "_count", sl.cache, func(hist *convertnhcb.TempHistogram) { processClassicHistogramSeries(lset, "_count", nhcbLabels, nhcbBuilder, func(hist *convertnhcb.TempHistogram) {
hist.Count = val hist.Count = val
}) })
case strings.HasSuffix(mName, "_sum"): case strings.HasSuffix(mName, "_sum"):
processClassicHistogramSeries(lset, "_sum", sl.cache, func(hist *convertnhcb.TempHistogram) { processClassicHistogramSeries(lset, "_sum", nhcbLabels, nhcbBuilder, func(hist *convertnhcb.TempHistogram) {
hist.Sum = val hist.Sum = val
}) })
} }
@ -1802,9 +1799,8 @@ loop:
}) })
} }
if sl.convertClassicHistograms { for hash, th := range nhcbBuilder {
for hash, th := range sl.cache.nhcbBuilder { lset, ok := nhcbLabels[hash]
lset, ok := sl.cache.nhcbLabels[hash]
if !ok { if !ok {
continue continue
} }
@ -1815,7 +1811,6 @@ loop:
upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(ub, false) upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(ub, false)
fhBase := hBase.ToFloat(nil) fhBase := hBase.ToFloat(nil)
h, fh := convertnhcb.ConvertHistogramWrapper(th, upperBounds, hBase, fhBase) h, fh := convertnhcb.ConvertHistogramWrapper(th, upperBounds, hBase, fhBase)
// fmt.Printf("FINAL lset: %s, timestamp: %v, val: %v\n", lset, defTime, fh)
if h != nil { if h != nil {
if err := h.Validate(); err != nil { if err := h.Validate(); err != nil {
continue continue
@ -1832,22 +1827,20 @@ loop:
} }
} }
} }
sl.cache.resetNhcb()
}
return return
} }
func processClassicHistogramSeries(lset labels.Labels, suffix string, cache *scrapeCache, updateHist func(*convertnhcb.TempHistogram)) { func processClassicHistogramSeries(lset labels.Labels, suffix string, nhcbLabels map[uint64]labels.Labels, nhcbBuilder map[uint64]convertnhcb.TempHistogram, updateHist func(*convertnhcb.TempHistogram)) {
m2 := convertnhcb.GetHistogramMetricBase(lset, suffix) m2 := convertnhcb.GetHistogramMetricBase(lset, suffix)
m2hash := m2.Hash() m2hash := m2.Hash()
cache.nhcbLabels[m2hash] = m2 nhcbLabels[m2hash] = m2
th, exists := cache.nhcbBuilder[m2hash] th, exists := nhcbBuilder[m2hash]
if !exists { if !exists {
th = convertnhcb.NewTempHistogram() th = convertnhcb.NewTempHistogram()
} }
updateHist(&th) updateHist(&th)
cache.nhcbBuilder[m2hash] = th nhcbBuilder[m2hash] = th
} }
// Adds samples to the appender, checking the error, and then returns the # of samples added, // Adds samples to the appender, checking the error, and then returns the # of samples added,