mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 13:44:05 -08:00
Merge pull request #11448 from codesome/gateproto
Gate protobuf scraping and update help text for enable-feature
This commit is contained in:
commit
8d045058c8
|
@ -198,6 +198,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
||||
case "native-histograms":
|
||||
c.tsdb.EnableNativeHistograms = true
|
||||
c.scrape.EnableProtobufNegotiation = true
|
||||
level.Info(logger).Log("msg", "Experimental native histogram support enabled.")
|
||||
case "":
|
||||
continue
|
||||
|
@ -400,7 +401,7 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
|
|
@ -132,6 +132,9 @@ type Options struct {
|
|||
// Option to enable the experimental in-memory metadata storage and append
|
||||
// metadata to the WAL.
|
||||
EnableMetadataStorage bool
|
||||
// Option to enable protobuf negotiation with the client. Note that the client can already
|
||||
// send protobuf without needing to enable this.
|
||||
EnableProtobufNegotiation bool
|
||||
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
||||
DiscoveryReloadInterval model.Duration
|
||||
|
||||
|
|
|
@ -243,6 +243,8 @@ type scrapePool struct {
|
|||
newLoop func(scrapeLoopOptions) loop
|
||||
|
||||
noDefaultPort bool
|
||||
|
||||
enableProtobufNegotiation bool
|
||||
}
|
||||
|
||||
type labelLimits struct {
|
||||
|
@ -293,6 +295,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
logger: logger,
|
||||
httpOpts: options.HTTPClientOptions,
|
||||
noDefaultPort: options.NoDefaultPort,
|
||||
enableProtobufNegotiation: options.EnableProtobufNegotiation,
|
||||
}
|
||||
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
||||
// Update the targets retrieval function for metadata to a new scrape cache.
|
||||
|
@ -433,8 +436,12 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
|
||||
t := sp.activeTargets[fp]
|
||||
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
|
||||
acceptHeader := scrapeAcceptHeader
|
||||
if sp.enableProtobufNegotiation {
|
||||
acceptHeader = scrapeAcceptHeaderWithProtobuf
|
||||
}
|
||||
var (
|
||||
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit}
|
||||
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
scraper: s,
|
||||
|
@ -537,8 +544,11 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
// for every target.
|
||||
var err error
|
||||
interval, timeout, err = t.intervalAndTimeout(interval, timeout)
|
||||
|
||||
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit}
|
||||
acceptHeader := scrapeAcceptHeader
|
||||
if sp.enableProtobufNegotiation {
|
||||
acceptHeader = scrapeAcceptHeaderWithProtobuf
|
||||
}
|
||||
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||
l := sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
scraper: s,
|
||||
|
@ -757,11 +767,15 @@ type targetScraper struct {
|
|||
buf *bufio.Reader
|
||||
|
||||
bodySizeLimit int64
|
||||
acceptHeader string
|
||||
}
|
||||
|
||||
var errBodySizeLimit = errors.New("body size limit exceeded")
|
||||
|
||||
const acceptHeader = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
const (
|
||||
scrapeAcceptHeader = `encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
scrapeAcceptHeaderWithProtobuf = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
)
|
||||
|
||||
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
|
@ -771,7 +785,7 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Add("Accept", acceptHeader)
|
||||
req.Header.Add("Accept", s.acceptHeader)
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
req.Header.Set("User-Agent", UserAgent)
|
||||
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64))
|
||||
|
|
|
@ -2147,12 +2147,16 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
expectedTimeout = "1.5"
|
||||
)
|
||||
|
||||
var protobufParsing bool
|
||||
|
||||
server := httptest.NewServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if protobufParsing {
|
||||
accept := r.Header.Get("Accept")
|
||||
if !strings.HasPrefix(accept, "application/vnd.google.protobuf;") {
|
||||
t.Errorf("Expected Accept header to prefer application/vnd.google.protobuf, got %q", accept)
|
||||
}
|
||||
}
|
||||
|
||||
timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")
|
||||
if timeout != expectedTimeout {
|
||||
|
@ -2170,6 +2174,7 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
runTest := func(acceptHeader string) {
|
||||
ts := &targetScraper{
|
||||
Target: &Target{
|
||||
labels: labels.FromStrings(
|
||||
|
@ -2179,6 +2184,7 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
},
|
||||
client: http.DefaultClient,
|
||||
timeout: configTimeout,
|
||||
acceptHeader: acceptHeader,
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
|
||||
|
@ -2188,6 +2194,11 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
|
||||
}
|
||||
|
||||
runTest(scrapeAcceptHeader)
|
||||
protobufParsing = true
|
||||
runTest(scrapeAcceptHeaderWithProtobuf)
|
||||
}
|
||||
|
||||
func TestTargetScrapeScrapeCancel(t *testing.T) {
|
||||
block := make(chan struct{})
|
||||
|
||||
|
@ -2211,6 +2222,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
|
|||
),
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
acceptHeader: scrapeAcceptHeader,
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
|
@ -2264,6 +2276,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
|
|||
),
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
acceptHeader: scrapeAcceptHeader,
|
||||
}
|
||||
|
||||
_, err = ts.scrape(context.Background(), io.Discard)
|
||||
|
@ -2305,6 +2318,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
|
|||
},
|
||||
client: http.DefaultClient,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: scrapeAcceptHeader,
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
|
||||
|
|
Loading…
Reference in a new issue