mirror of
https://github.com/prometheus/prometheus.git
synced 2025-02-02 08:31:11 -08:00
Fix wording from "jitterSeed" -> "offsetSeed" for server-wide scrape offsets
In digital communication, "jitter" usually refers to how much a signal deviates from true periodicity, see https://en.wikipedia.org/wiki/Jitter. The way we are using the "jitterSeed" in Prometheus does not affect the true periodicity at all, but just introduces a constant phase shift (or offset) within the period. So it would be more correct and less confusing to call the "jitterSeed" an "offsetSeed" instead. Signed-off-by: Julius Volz <julius.volz@gmail.com>
This commit is contained in:
parent
d0fea47a9c
commit
199cfc1324
|
@ -150,7 +150,7 @@ type Manager struct {
|
||||||
append storage.Appendable
|
append storage.Appendable
|
||||||
graceShut chan struct{}
|
graceShut chan struct{}
|
||||||
|
|
||||||
jitterSeed uint64 // Global jitterSeed seed is used to spread scrape workload across HA setup.
|
offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup.
|
||||||
mtxScrape sync.Mutex // Guards the fields below.
|
mtxScrape sync.Mutex // Guards the fields below.
|
||||||
scrapeConfigs map[string]*config.ScrapeConfig
|
scrapeConfigs map[string]*config.ScrapeConfig
|
||||||
scrapePools map[string]*scrapePool
|
scrapePools map[string]*scrapePool
|
||||||
|
@ -214,7 +214,7 @@ func (m *Manager) reload() {
|
||||||
level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
|
level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts)
|
sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName)
|
level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName)
|
||||||
continue
|
continue
|
||||||
|
@ -234,8 +234,8 @@ func (m *Manager) reload() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// setJitterSeed calculates a global jitterSeed per server relying on extra label set.
|
// setOffsetSeed calculates a global offsetSeed per server relying on extra label set.
|
||||||
func (m *Manager) setJitterSeed(labels labels.Labels) error {
|
func (m *Manager) setOffsetSeed(labels labels.Labels) error {
|
||||||
h := fnv.New64a()
|
h := fnv.New64a()
|
||||||
hostname, err := osutil.GetFQDN()
|
hostname, err := osutil.GetFQDN()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -244,7 +244,7 @@ func (m *Manager) setJitterSeed(labels labels.Labels) error {
|
||||||
if _, err := fmt.Fprintf(h, "%s%s", hostname, labels.String()); err != nil {
|
if _, err := fmt.Fprintf(h, "%s%s", hostname, labels.String()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m.jitterSeed = h.Sum64()
|
m.offsetSeed = h.Sum64()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -281,7 +281,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
|
||||||
}
|
}
|
||||||
m.scrapeConfigs = c
|
m.scrapeConfigs = c
|
||||||
|
|
||||||
if err := m.setJitterSeed(cfg.GlobalConfig.ExternalLabels); err != nil {
|
if err := m.setOffsetSeed(cfg.GlobalConfig.ExternalLabels); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -596,7 +596,7 @@ func TestManagerTargetsUpdates(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetJitter(t *testing.T) {
|
func TestSetOffsetSeed(t *testing.T) {
|
||||||
getConfig := func(prometheus string) *config.Config {
|
getConfig := func(prometheus string) *config.Config {
|
||||||
cfgText := `
|
cfgText := `
|
||||||
global:
|
global:
|
||||||
|
@ -617,24 +617,24 @@ global:
|
||||||
|
|
||||||
// Load the first config.
|
// Load the first config.
|
||||||
cfg1 := getConfig("ha1")
|
cfg1 := getConfig("ha1")
|
||||||
if err := scrapeManager.setJitterSeed(cfg1.GlobalConfig.ExternalLabels); err != nil {
|
if err := scrapeManager.setOffsetSeed(cfg1.GlobalConfig.ExternalLabels); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
jitter1 := scrapeManager.jitterSeed
|
offsetSeed1 := scrapeManager.offsetSeed
|
||||||
|
|
||||||
if jitter1 == 0 {
|
if offsetSeed1 == 0 {
|
||||||
t.Error("Jitter has to be a hash of uint64")
|
t.Error("Offset seed has to be a hash of uint64")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load the first config.
|
// Load the first config.
|
||||||
cfg2 := getConfig("ha2")
|
cfg2 := getConfig("ha2")
|
||||||
if err := scrapeManager.setJitterSeed(cfg2.GlobalConfig.ExternalLabels); err != nil {
|
if err := scrapeManager.setOffsetSeed(cfg2.GlobalConfig.ExternalLabels); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
jitter2 := scrapeManager.jitterSeed
|
offsetSeed2 := scrapeManager.offsetSeed
|
||||||
|
|
||||||
if jitter1 == jitter2 {
|
if offsetSeed1 == offsetSeed2 {
|
||||||
t.Error("Jitter should not be the same on different set of external labels")
|
t.Error("Offset seed should not be the same on different set of external labels")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -279,7 +279,7 @@ const maxAheadTime = 10 * time.Minute
|
||||||
// returning an empty label set is interpreted as "drop"
|
// returning an empty label set is interpreted as "drop"
|
||||||
type labelsMutator func(labels.Labels) labels.Labels
|
type labelsMutator func(labels.Labels) labels.Labels
|
||||||
|
|
||||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) {
|
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) {
|
||||||
targetScrapePools.Inc()
|
targetScrapePools.Inc()
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
|
@ -325,7 +325,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
||||||
func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
|
func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
|
||||||
func(ctx context.Context) storage.Appender { return app.Appender(ctx) },
|
func(ctx context.Context) storage.Appender { return app.Appender(ctx) },
|
||||||
cache,
|
cache,
|
||||||
jitterSeed,
|
offsetSeed,
|
||||||
opts.honorTimestamps,
|
opts.honorTimestamps,
|
||||||
opts.sampleLimit,
|
opts.sampleLimit,
|
||||||
opts.bucketLimit,
|
opts.bucketLimit,
|
||||||
|
@ -775,7 +775,7 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int) storage.Append
|
||||||
type scraper interface {
|
type scraper interface {
|
||||||
scrape(ctx context.Context, w io.Writer) (string, error)
|
scrape(ctx context.Context, w io.Writer) (string, error)
|
||||||
Report(start time.Time, dur time.Duration, err error)
|
Report(start time.Time, dur time.Duration, err error)
|
||||||
offset(interval time.Duration, jitterSeed uint64) time.Duration
|
offset(interval time.Duration, offsetSeed uint64) time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// targetScraper implements the scraper interface for a target.
|
// targetScraper implements the scraper interface for a target.
|
||||||
|
@ -891,7 +891,7 @@ type scrapeLoop struct {
|
||||||
cache *scrapeCache
|
cache *scrapeCache
|
||||||
lastScrapeSize int
|
lastScrapeSize int
|
||||||
buffers *pool.Pool
|
buffers *pool.Pool
|
||||||
jitterSeed uint64
|
offsetSeed uint64
|
||||||
honorTimestamps bool
|
honorTimestamps bool
|
||||||
forcedErr error
|
forcedErr error
|
||||||
forcedErrMtx sync.Mutex
|
forcedErrMtx sync.Mutex
|
||||||
|
@ -1175,7 +1175,7 @@ func newScrapeLoop(ctx context.Context,
|
||||||
reportSampleMutator labelsMutator,
|
reportSampleMutator labelsMutator,
|
||||||
appender func(ctx context.Context) storage.Appender,
|
appender func(ctx context.Context) storage.Appender,
|
||||||
cache *scrapeCache,
|
cache *scrapeCache,
|
||||||
jitterSeed uint64,
|
offsetSeed uint64,
|
||||||
honorTimestamps bool,
|
honorTimestamps bool,
|
||||||
sampleLimit int,
|
sampleLimit int,
|
||||||
bucketLimit int,
|
bucketLimit int,
|
||||||
|
@ -1217,7 +1217,7 @@ func newScrapeLoop(ctx context.Context,
|
||||||
sampleMutator: sampleMutator,
|
sampleMutator: sampleMutator,
|
||||||
reportSampleMutator: reportSampleMutator,
|
reportSampleMutator: reportSampleMutator,
|
||||||
stopped: make(chan struct{}),
|
stopped: make(chan struct{}),
|
||||||
jitterSeed: jitterSeed,
|
offsetSeed: offsetSeed,
|
||||||
l: l,
|
l: l,
|
||||||
parentCtx: ctx,
|
parentCtx: ctx,
|
||||||
appenderCtx: appenderCtx,
|
appenderCtx: appenderCtx,
|
||||||
|
@ -1238,7 +1238,7 @@ func newScrapeLoop(ctx context.Context,
|
||||||
|
|
||||||
func (sl *scrapeLoop) run(errc chan<- error) {
|
func (sl *scrapeLoop) run(errc chan<- error) {
|
||||||
select {
|
select {
|
||||||
case <-time.After(sl.scraper.offset(sl.interval, sl.jitterSeed)):
|
case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)):
|
||||||
// Continue after a scraping offset.
|
// Continue after a scraping offset.
|
||||||
case <-sl.ctx.Done():
|
case <-sl.ctx.Done():
|
||||||
close(sl.stopped)
|
close(sl.stopped)
|
||||||
|
|
|
@ -59,7 +59,7 @@ func TestTargetLabels(t *testing.T) {
|
||||||
|
|
||||||
func TestTargetOffset(t *testing.T) {
|
func TestTargetOffset(t *testing.T) {
|
||||||
interval := 10 * time.Second
|
interval := 10 * time.Second
|
||||||
jitter := uint64(0)
|
offsetSeed := uint64(0)
|
||||||
|
|
||||||
offsets := make([]time.Duration, 10000)
|
offsets := make([]time.Duration, 10000)
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ func TestTargetOffset(t *testing.T) {
|
||||||
target := newTestTarget("example.com:80", 0, labels.FromStrings(
|
target := newTestTarget("example.com:80", 0, labels.FromStrings(
|
||||||
"label", fmt.Sprintf("%d", i),
|
"label", fmt.Sprintf("%d", i),
|
||||||
))
|
))
|
||||||
offsets[i] = target.offset(interval, jitter)
|
offsets[i] = target.offset(interval, offsetSeed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put the offsets into buckets and validate that they are all
|
// Put the offsets into buckets and validate that they are all
|
||||||
|
|
Loading…
Reference in a new issue