From bec5abc4dc63831dfbfba50bc3230a6fea6b78e0 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 20 Dec 2022 16:54:07 +0000 Subject: [PATCH 1/7] scrape: remove unsafe code The `yolostring` routine was intended to avoid an allocation when converting from a `[]byte` to a `string` for map lookup. However, since 2014 Go has recognized this pattern and does not make a copy of the data when looking up a map. So the unsafe code is not necessary. In line with this, constants like `scrapeHealthMetricName` also become `[]byte`. Signed-off-by: Bryan Boreham --- scrape/scrape.go | 57 ++++++++++++++++++++----------------------- scrape/scrape_test.go | 10 ++++---- 2 files changed, 31 insertions(+), 36 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index 10e9119a26..c57e7b4062 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -27,7 +27,6 @@ import ( "strconv" "sync" "time" - "unsafe" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -1006,8 +1005,8 @@ func (c *scrapeCache) iterDone(flushCache bool) { } } -func (c *scrapeCache) get(met string) (*cacheEntry, bool) { - e, ok := c.series[met] +func (c *scrapeCache) get(met []byte) (*cacheEntry, bool) { + e, ok := c.series[string(met)] if !ok { return nil, false } @@ -1015,11 +1014,11 @@ func (c *scrapeCache) get(met string) (*cacheEntry, bool) { return e, true } -func (c *scrapeCache) addRef(met string, ref storage.SeriesRef, lset labels.Labels, hash uint64) { +func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) { if ref == 0 { return } - c.series[met] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash} + c.series[string(met)] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash} } func (c *scrapeCache) addDropped(met string) { @@ -1027,8 +1026,8 @@ func (c *scrapeCache) addDropped(met string) { c.droppedSeries[met] = &iter } -func (c *scrapeCache) getDropped(met string) bool { - iterp, ok := c.droppedSeries[met] +func (c *scrapeCache) getDropped(met []byte) bool { + iterp, ok := c.droppedSeries[string(met)] if ok { *iterp = c.iter } @@ -1052,7 +1051,7 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) { func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) { c.metaMtx.Lock() - e, ok := c.metadata[yoloString(metric)] + e, ok := c.metadata[string(metric)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} c.metadata[string(metric)] = e @@ -1069,12 +1068,12 @@ func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) { func (c *scrapeCache) setHelp(metric, help []byte) { c.metaMtx.Lock() - e, ok := c.metadata[yoloString(metric)] + e, ok := c.metadata[string(metric)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} c.metadata[string(metric)] = e } - if e.Help != yoloString(help) { + if e.Help != string(help) { e.Help = string(help) e.lastIterChange = c.iter } @@ -1086,12 +1085,12 @@ func (c *scrapeCache) setHelp(metric, help []byte) { func (c *scrapeCache) setUnit(metric, unit []byte) { c.metaMtx.Lock() - e, ok := c.metadata[yoloString(metric)] + e, ok := c.metadata[string(metric)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} c.metadata[string(metric)] = e } - if e.Unit != yoloString(unit) { + if e.Unit != string(unit) { e.Unit = string(unit) e.lastIterChange = c.iter } @@ -1509,7 +1508,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, sl.cache.metaMtx.Lock() defer sl.cache.metaMtx.Unlock() - metaEntry, metaOk := sl.cache.metadata[yoloString([]byte(lset.Get(labels.MetricName)))] + metaEntry, metaOk := sl.cache.metadata[lset.Get(labels.MetricName)] if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) { metadataChanged = true meta.Type = metaEntry.Type @@ -1584,10 +1583,10 @@ loop: meta = metadata.Metadata{} metadataChanged = false - if sl.cache.getDropped(yoloString(met)) { + if sl.cache.getDropped(met) { continue } - ce, ok := sl.cache.get(yoloString(met)) + ce, ok := sl.cache.get(met) var ( ref storage.SeriesRef lset labels.Labels @@ -1654,7 +1653,7 @@ loop: // Bypass staleness logic if there is an explicit timestamp. sl.cache.trackStaleness(hash, lset) } - sl.cache.addRef(mets, ref, lset, hash) + sl.cache.addRef(met, ref, lset, hash) if sampleAdded && sampleLimitErr == nil { seriesAdded++ } @@ -1719,10 +1718,6 @@ loop: return } -func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) -} - // Adds samples to the appender, checking the error, and then returns the # of samples added, // whether the caller should continue to process more samples, and any sample limit errors. func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) { @@ -1775,15 +1770,15 @@ func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appE // The constants are suffixed with the invalid \xff unicode rune to avoid collisions // with scraped metrics in the cache. -const ( - scrapeHealthMetricName = "up" + "\xff" - scrapeDurationMetricName = "scrape_duration_seconds" + "\xff" - scrapeSamplesMetricName = "scrape_samples_scraped" + "\xff" - samplesPostRelabelMetricName = "scrape_samples_post_metric_relabeling" + "\xff" - scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff" - scrapeTimeoutMetricName = "scrape_timeout_seconds" + "\xff" - scrapeSampleLimitMetricName = "scrape_sample_limit" + "\xff" - scrapeBodySizeBytesMetricName = "scrape_body_size_bytes" + "\xff" +var ( + scrapeHealthMetricName = []byte("up" + "\xff") + scrapeDurationMetricName = []byte("scrape_duration_seconds" + "\xff") + scrapeSamplesMetricName = []byte("scrape_samples_scraped" + "\xff") + samplesPostRelabelMetricName = []byte("scrape_samples_post_metric_relabeling" + "\xff") + scrapeSeriesAddedMetricName = []byte("scrape_series_added" + "\xff") + scrapeTimeoutMetricName = []byte("scrape_timeout_seconds" + "\xff") + scrapeSampleLimitMetricName = []byte("scrape_sample_limit" + "\xff") + scrapeBodySizeBytesMetricName = []byte("scrape_body_size_bytes" + "\xff") ) func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { @@ -1859,7 +1854,7 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er return } -func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v float64) error { +func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64) error { ce, ok := sl.cache.get(s) var ref storage.SeriesRef var lset labels.Labels @@ -1870,7 +1865,7 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v // The constants are suffixed with the invalid \xff unicode rune to avoid collisions // with scraped metrics in the cache. // We have to drop it when building the actual metric. - lset = labels.FromStrings(labels.MetricName, s[:len(s)-1]) + lset = labels.FromStrings(labels.MetricName, string(s[:len(s)-1])) lset = sl.reportSampleMutator(lset) } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 4fa871a3e2..384a30627e 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1586,21 +1586,21 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { fakeRef := storage.SeriesRef(1) expValue := float64(1) - metric := `metric{n="1"} 1` - p, warning := textparse.New([]byte(metric), "") + metric := []byte(`metric{n="1"} 1`) + p, warning := textparse.New(metric, "") require.NoError(t, warning) var lset labels.Labels p.Next() - mets := p.Metric(&lset) + p.Metric(&lset) hash := lset.Hash() // Create a fake entry in the cache - sl.cache.addRef(mets, fakeRef, lset, hash) + sl.cache.addRef(metric, fakeRef, lset, hash) now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(metric), "", now) + _, _, _, err := sl.append(slApp, metric, "", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) From 89539c35c9ffd139eff079b0fe8fa7e03814f274 Mon Sep 17 00:00:00 2001 From: Levi Harrison Date: Thu, 29 Dec 2022 14:18:42 -0500 Subject: [PATCH 2/7] Remove nomad `datacenter` field in configuration docs Signed-off-by: Levi Harrison --- docs/configuration/configuration.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 058a33da26..dee7cfce87 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2418,7 +2418,6 @@ The following meta labels are available on targets during [relabeling](#relabel_ # The information to access the Nomad API. It is to be defined # as the Nomad documentation requires. [ allow_stale: | default = true ] -[ datacenter: ] [ namespace: | default = default ] [ refresh_interval: | default = 60s ] [ region: | default = global ] From d228d1d9cc48025e27000073cbb8cbf77dec28cc Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 4 Jan 2023 12:05:42 +0000 Subject: [PATCH 3/7] scrape: remove 'mets' string completely This makes all usage of maps in scrape.go consistent. Also remove comment about unsafe strings, since we don't use them any more in this package. Signed-off-by: Bryan Boreham --- scrape/scrape.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index c57e7b4062..6160ae36fb 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -916,8 +916,7 @@ type scrapeCache struct { series map[string]*cacheEntry // Cache of dropped metric strings and their iteration. The iteration must - // be a pointer so we can update it without setting a new entry with an unsafe - // string in addDropped(). + // be a pointer so we can update it. droppedSeries map[string]*uint64 // seriesCur and seriesPrev store the labels of series that were seen @@ -1021,9 +1020,9 @@ func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labe c.series[string(met)] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash} } -func (c *scrapeCache) addDropped(met string) { +func (c *scrapeCache) addDropped(met []byte) { iter := c.iter - c.droppedSeries[met] = &iter + c.droppedSeries[string(met)] = &iter } func (c *scrapeCache) getDropped(met []byte) bool { @@ -1590,7 +1589,6 @@ loop: var ( ref storage.SeriesRef lset labels.Labels - mets string hash uint64 ) @@ -1601,7 +1599,7 @@ loop: // Update metadata only if it changed in the current iteration. updateMetadata(lset, false) } else { - mets = p.Metric(&lset) + p.Metric(&lset) hash = lset.Hash() // Hash label set as it is seen local to the target. Then add target labels @@ -1610,7 +1608,7 @@ loop: // The label set may be set to empty to indicate dropping. if lset.IsEmpty() { - sl.cache.addDropped(mets) + sl.cache.addDropped(met) continue } From 2f58be840d06e647dfdbfa952e3451c0b6afee50 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 22 Dec 2022 11:19:06 +0000 Subject: [PATCH 4/7] service discovery: add config name to log messages This makes it easier to connect a log message with the config it relates to. Each SD config has a name, either the scrape job name or something like "config-0" for Alertmanager config. Signed-off-by: Bryan Boreham --- discovery/legacymanager/manager.go | 4 ++-- discovery/manager.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go index 7a3d6b3b82..87823f4010 100644 --- a/discovery/legacymanager/manager.go +++ b/discovery/legacymanager/manager.go @@ -311,10 +311,10 @@ func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int } typ := cfg.Name() d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ), + Logger: log.With(m.logger, "discovery", typ, "config", setName), }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) + level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/discovery/manager.go b/discovery/manager.go index b7357fa6cd..8b304a0faf 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -428,11 +428,11 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ), + Logger: log.With(m.logger, "discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) + level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } From 78d3c4e823dcf39d8c9be2046bdda6f5da0b5d69 Mon Sep 17 00:00:00 2001 From: Mingjie Shao Date: Mon, 16 Jan 2023 16:19:31 +0800 Subject: [PATCH 5/7] tsdb: Fixed typo in Histogram Signed-off-by: Mingjie Shao --- tsdb/chunkenc/histogram.go | 4 ++-- tsdb/head_append.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index b962e45a4a..6ceb866a52 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -590,9 +590,9 @@ func (a *HistogramAppender) Recode( return hc, app } -// RecodeHistogramm converts the current histogram (in-place) to accommodate an expansion of the set of +// RecodeHistogram converts the current histogram (in-place) to accommodate an expansion of the set of // (positive and/or negative) buckets used. -func (a *HistogramAppender) RecodeHistogramm( +func (a *HistogramAppender) RecodeHistogram( h *histogram.Histogram, pBackwardInter, nBackwardInter []Interjection, ) { diff --git a/tsdb/head_append.go b/tsdb/head_append.go index c62f4ffecb..d4534c6695 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1172,7 +1172,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui if len(pBackwardInter)+len(nBackwardInter) > 0 { h.PositiveSpans = pMergedSpans h.NegativeSpans = nMergedSpans - app.RecodeHistogramm(h, pBackwardInter, nBackwardInter) + app.RecodeHistogram(h, pBackwardInter, nBackwardInter) } // We have 3 cases here // - !okToAppend -> We need to cut a new chunk. From faac4c066d724d913ec5cf9f683ee94be5b61c5b Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 20 Dec 2022 17:59:22 +0000 Subject: [PATCH 6/7] package storage: fix up test not to access Labels internals Signed-off-by: Bryan Boreham --- storage/series_test.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/storage/series_test.go b/storage/series_test.go index 8a2675e352..1f68d23b96 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -76,10 +76,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { samples []tsdbutil.Sample }{ { - lbs: labels.Labels{ - {Name: "__name__", Value: "up"}, - {Name: "instance", Value: "localhost:8080"}, - }, + lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8080"), samples: []tsdbutil.Sample{ sample{t: 1, v: 1}, sample{t: 2, v: 2}, @@ -87,10 +84,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { sample{t: 4, v: 4}, }, }, { - lbs: labels.Labels{ - {Name: "__name__", Value: "up"}, - {Name: "instance", Value: "localhost:8081"}, - }, + lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8081"), samples: []tsdbutil.Sample{ sample{t: 1, v: 2}, sample{t: 2, v: 3}, From 6e560fe19ba528f10fa1417ea1ae90da0e5ac38a Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 17 Jan 2023 16:53:49 +0530 Subject: [PATCH 7/7] tsdb: Avoid unnecessary allocation from 11779 Signed-off-by: Ganesh Vernekar --- tsdb/head_wal.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 9bc6985d3e..708541364c 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -499,7 +499,6 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks))) h.metrics.chunks.Add(float64(len(mmc) + len(oooMmc) - len(mSeries.mmappedChunks))) mSeries.mmappedChunks = mmc - mSeries.ooo = nil if len(oooMmc) == 0 { mSeries.ooo = nil } else {