Add enable_compression scrape config option

Currently Prometheus will always request gzip compression from the target when sending scrape requests.
HTTP compression does reduce the amount of bytes sent over the wire and so is often desirable.
The downside of compression is that it requires extra resources - cpu & memory.

This also affects the resource usage on the target since it has to compress the response
before sending it to Prometheus.

This change adds a new option to the scrape job configuration block: enable_compression.
The default is true so it remains the same as current Prometheus behaviour.

Setting this option to false allows users to disable compression between Prometheus
and the scraped target, which will require more bandwidth but it lowers the resource
usage of both Prometheus and the target.

Fixes #12319.

Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
This commit is contained in:
Łukasz Mierzwa 2023-11-20 12:02:53 +00:00
parent 9509ad082a
commit 870627fbed
6 changed files with 192 additions and 18 deletions

View file

@ -158,6 +158,7 @@ var (
HonorLabels: false,
HonorTimestamps: true,
HTTPClientConfig: config.DefaultHTTPClientConfig,
EnableCompression: true,
}
// DefaultAlertmanagerConfig is the default alertmanager configuration.
@ -582,6 +583,8 @@ type ScrapeConfig struct {
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"`
// Indicator whether to request compressed response from the target.
EnableCompression bool `yaml:"enable_compression"`
// An uncompressed response body larger than this many bytes will cause the
// scrape to fail. 0 means no limit.
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`

View file

@ -186,6 +186,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -288,6 +289,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(50 * time.Second),
ScrapeTimeout: model.Duration(5 * time.Second),
EnableCompression: true,
BodySizeLimit: 10 * units.MiB,
SampleLimit: 1000,
TargetLimit: 35,
@ -384,6 +386,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -438,6 +441,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: model.Duration(10 * time.Second),
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -470,6 +474,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -508,6 +513,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -546,6 +552,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -573,6 +580,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -609,6 +617,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -642,6 +651,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -682,6 +692,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -712,6 +723,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -745,6 +757,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -771,6 +784,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -800,6 +814,7 @@ var expectedConf = &Config{
HonorTimestamps: false,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -829,6 +844,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -858,6 +874,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -884,6 +901,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -918,6 +936,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -951,6 +970,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -980,6 +1000,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -1009,6 +1030,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -1042,6 +1064,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -1078,6 +1101,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -1133,6 +1157,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -1159,6 +1184,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -1196,6 +1222,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -1239,6 +1266,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -1273,6 +1301,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -1301,6 +1330,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -1332,6 +1362,7 @@ var expectedConf = &Config{
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true,
BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit,
@ -2060,9 +2091,10 @@ func TestGetScrapeConfigs(t *testing.T) {
ScrapeTimeout: scrapeTimeout,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: config.DefaultHTTPClientConfig,
MetricsPath: "/metrics",
Scheme: "http",
EnableCompression: true,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
@ -2118,6 +2150,8 @@ func TestGetScrapeConfigs(t *testing.T) {
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
EnableCompression: true,
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/scrape_configs/valid_cert_file"),
@ -2158,6 +2192,8 @@ func TestGetScrapeConfigs(t *testing.T) {
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
EnableCompression: true,
ServiceDiscoveryConfigs: discovery.Configs{
&vultr.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
@ -2210,3 +2246,16 @@ func kubernetesSDHostURL() config.URL {
tURL, _ := url.Parse("https://localhost:1234")
return config.URL{URL: tURL}
}
func TestScrapeConfigDisableCompression(t *testing.T) {
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, log.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
got := &Config{}
require.NoError(t, yaml.UnmarshalStrict(out, got))
require.Equal(t, false, got.ScrapeConfigs[0].EnableCompression)
}

View file

@ -0,0 +1,5 @@
scrape_configs:
- job_name: prometheus
static_configs:
- targets: ['localhost:8080']
enable_compression: false

View file

@ -237,6 +237,10 @@ job_name: <job_name>
params:
[ <string>: [<string>, ...] ]
# If enable_compression is set to "false", Prometheus will request uncompressed
# response from the scraped target.
[ enable_compression: <boolean> | default = true ]
# Sets the `Authorization` header on every scrape request with the
# configured username and password.
# password and password_file are mutually exclusive.

View file

@ -109,6 +109,7 @@ type scrapeLoopOptions struct {
scrapeClassicHistograms bool
mrc []*relabel.Config
cache *scrapeCache
enableCompression bool
}
const maxAheadTime = 10 * time.Minute
@ -163,6 +164,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
offsetSeed,
opts.honorTimestamps,
opts.trackTimestampsStaleness,
opts.enableCompression,
opts.sampleLimit,
opts.bucketLimit,
opts.labelLimits,
@ -275,6 +277,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
}
honorLabels = sp.config.HonorLabels
honorTimestamps = sp.config.HonorTimestamps
enableCompression = sp.config.EnableCompression
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
mrc = sp.config.MetricRelabelConfigs
)
@ -295,11 +298,12 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
var (
s = &targetScraper{
Target: t,
client: sp.client,
timeout: timeout,
bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
Target: t,
client: sp.client,
timeout: timeout,
bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
}
newLoop = sp.newLoop(scrapeLoopOptions{
target: t,
@ -309,6 +313,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
labelLimits: labelLimits,
honorLabels: honorLabels,
honorTimestamps: honorTimestamps,
enableCompression: enableCompression,
trackTimestampsStaleness: trackTimestampsStaleness,
mrc: mrc,
cache: cache,
@ -403,6 +408,7 @@ func (sp *scrapePool) sync(targets []*Target) {
}
honorLabels = sp.config.HonorLabels
honorTimestamps = sp.config.HonorTimestamps
enableCompression = sp.config.EnableCompression
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
mrc = sp.config.MetricRelabelConfigs
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
@ -419,12 +425,13 @@ func (sp *scrapePool) sync(targets []*Target) {
var err error
interval, timeout, err = t.intervalAndTimeout(interval, timeout)
s := &targetScraper{
Target: t,
client: sp.client,
timeout: timeout,
bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
metrics: sp.metrics,
Target: t,
client: sp.client,
timeout: timeout,
bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
metrics: sp.metrics,
}
l := sp.newLoop(scrapeLoopOptions{
target: t,
@ -434,6 +441,7 @@ func (sp *scrapePool) sync(targets []*Target) {
labelLimits: labelLimits,
honorLabels: honorLabels,
honorTimestamps: honorTimestamps,
enableCompression: enableCompression,
trackTimestampsStaleness: trackTimestampsStaleness,
mrc: mrc,
interval: interval,
@ -647,8 +655,9 @@ type targetScraper struct {
gzipr *gzip.Reader
buf *bufio.Reader
bodySizeLimit int64
acceptHeader string
bodySizeLimit int64
acceptHeader string
acceptEncodingHeader string
metrics *scrapeMetrics
}
@ -670,6 +679,13 @@ func acceptHeader(sps []config.ScrapeProtocol) string {
return strings.Join(vals, ",")
}
func acceptEncodingHeader(enableCompression bool) string {
if enableCompression {
return "gzip"
}
return "identity"
}
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) {
@ -679,7 +695,7 @@ func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) {
return nil, err
}
req.Header.Add("Accept", s.acceptHeader)
req.Header.Add("Accept-Encoding", "gzip")
req.Header.Add("Accept-Encoding", s.acceptEncodingHeader)
req.Header.Set("User-Agent", UserAgent)
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64))
@ -765,6 +781,7 @@ type scrapeLoop struct {
offsetSeed uint64
honorTimestamps bool
trackTimestampsStaleness bool
enableCompression bool
forcedErr error
forcedErrMtx sync.Mutex
sampleLimit int
@ -1055,6 +1072,7 @@ func newScrapeLoop(ctx context.Context,
offsetSeed uint64,
honorTimestamps bool,
trackTimestampsStaleness bool,
enableCompression bool,
sampleLimit int,
bucketLimit int,
labelLimits *labelLimits,
@ -1102,6 +1120,7 @@ func newScrapeLoop(ctx context.Context,
appenderCtx: appenderCtx,
honorTimestamps: honorTimestamps,
trackTimestampsStaleness: trackTimestampsStaleness,
enableCompression: enableCompression,
sampleLimit: sampleLimit,
bucketLimit: bucketLimit,
labelLimits: labelLimits,

View file

@ -651,6 +651,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
nil, nil, 0,
true,
false,
true,
0, 0,
nil,
1,
@ -726,6 +727,7 @@ func TestScrapeLoopStop(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
10*time.Millisecond,
@ -805,6 +807,7 @@ func TestScrapeLoopRun(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
time.Second,
@ -863,6 +866,7 @@ func TestScrapeLoopRun(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
time.Second,
@ -925,6 +929,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
time.Second,
@ -986,6 +991,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -1046,6 +1052,7 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -1109,6 +1116,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -1190,6 +1198,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
10*time.Millisecond,
@ -1256,6 +1265,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
10*time.Millisecond,
@ -1325,6 +1335,7 @@ func TestScrapeLoopCache(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
10*time.Millisecond,
@ -1411,6 +1422,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
10*time.Millisecond,
@ -1528,6 +1540,7 @@ func TestScrapeLoopAppend(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -1626,7 +1639,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
},
nil,
func(ctx context.Context) storage.Appender { return app },
nil, 0, true, false, 0, 0, nil, 0, 0, false, false, false, nil, false, newTestScrapeMetrics(t),
nil, 0, true, false, true, 0, 0, nil, 0, 0, false, false, false, nil, false, newTestScrapeMetrics(t),
)
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
@ -1658,6 +1671,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -1719,6 +1733,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
0,
true,
false,
true,
app.limit, 0,
nil,
0,
@ -1799,6 +1814,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
0,
true,
false,
true,
app.limit, 0,
nil,
0,
@ -1900,6 +1916,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -1951,6 +1968,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -2005,6 +2023,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -2049,6 +2068,7 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) {
0,
true,
true,
true,
0, 0,
nil,
0,
@ -2421,6 +2441,7 @@ metric: <
0,
true,
false,
true,
0, 0,
nil,
0,
@ -2511,6 +2532,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -2566,6 +2588,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
10*time.Millisecond,
@ -2605,6 +2628,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
10*time.Millisecond,
@ -2657,6 +2681,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
0,
true,
false,
true,
0, 0,
nil,
0,
@ -2705,6 +2730,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -2997,6 +3023,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
nil, 0,
true,
false,
true,
0, 0,
nil,
0,
@ -3041,6 +3068,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
nil, 0,
false,
false,
true,
0, 0,
nil,
0,
@ -3084,6 +3112,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -3145,6 +3174,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -3411,6 +3441,7 @@ func TestScrapeAddFast(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
0,
@ -3500,6 +3531,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
0,
true,
false,
true,
0, 0,
nil,
10*time.Millisecond,
@ -3705,6 +3737,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
0,
true,
false,
true,
0, 0,
&test.labelLimits,
0,
@ -3911,6 +3944,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *
0,
true,
true,
true,
0, 0,
nil,
10*time.Millisecond,
@ -3956,3 +3990,63 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *
require.True(t, value.IsStaleNaN(appender.resultFloats[6].f),
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f))
}
func TestScrapeLoopCompression(t *testing.T) {
simpleStorage := teststorage.New(t)
defer simpleStorage.Close()
metricsText := makeTestMetrics(10)
for _, tc := range []struct {
enableCompression bool
acceptEncoding string
}{
{
enableCompression: true,
acceptEncoding: "gzip",
},
{
enableCompression: false,
acceptEncoding: "identity",
},
} {
t.Run(fmt.Sprintf("compression=%v,acceptEncoding=%s", tc.enableCompression, tc.acceptEncoding), func(t *testing.T) {
scraped := make(chan bool)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, tc.acceptEncoding, r.Header.Get("Accept-Encoding"), "invalid value of the Accept-Encoding header")
fmt.Fprint(w, metricsText)
close(scraped)
}))
defer ts.Close()
config := &config.ScrapeConfig{
JobName: "test",
SampleLimit: 100,
Scheme: "http",
ScrapeInterval: model.Duration(100 * time.Millisecond),
ScrapeTimeout: model.Duration(100 * time.Millisecond),
EnableCompression: tc.enableCompression,
}
sp, err := newScrapePool(config, simpleStorage, 0, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
testURL, err := url.Parse(ts.URL)
require.NoError(t, err)
sp.Sync([]*targetgroup.Group{
{
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}},
},
})
require.Equal(t, 1, len(sp.ActiveTargets()))
select {
case <-time.After(5 * time.Second):
t.Fatalf("target was not scraped")
case <-scraped:
}
})
}
}