mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-24 21:24:05 -08:00
Added ability to specify scrape protocols to accept during HTTP content type negotiation. (#12738)
* Added ability to specify scrape protocols to accept during HTTP content type negotiation. This is done via new option in GlobalConfig and ScrapeConfig: "scrape_protocol" Signed-off-by: bwplotka <bwplotka@gmail.com> * Fixed readability and log message. Signed-off-by: bwplotka <bwplotka@gmail.com> --------- Signed-off-by: bwplotka <bwplotka@gmail.com>
This commit is contained in:
parent
4b9c19fe55
commit
624b973ebf
|
@ -202,8 +202,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
||||
case "native-histograms":
|
||||
c.tsdb.EnableNativeHistograms = true
|
||||
c.scrape.EnableProtobufNegotiation = true
|
||||
level.Info(logger).Log("msg", "Experimental native histogram support enabled.")
|
||||
// Change global variable. Hacky, but it's hard to pass new option or default to unmarshaller.
|
||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols
|
||||
level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultConfig.GlobalConfig.ScrapeProtocols))
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
|
|
100
config/config.go
100
config/config.go
|
@ -19,6 +19,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -143,12 +144,14 @@ var (
|
|||
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
EvaluationInterval: model.Duration(1 * time.Minute),
|
||||
// When native histogram feature flag is enabled, ScrapeProtocols default
|
||||
// changes to DefaultNativeHistogramScrapeProtocols.
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
}
|
||||
|
||||
// DefaultScrapeConfig is the default scrape configuration.
|
||||
DefaultScrapeConfig = ScrapeConfig{
|
||||
// ScrapeTimeout and ScrapeInterval default to the configured
|
||||
// globals.
|
||||
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
|
||||
ScrapeClassicHistograms: false,
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
|
@ -260,7 +263,7 @@ func (c Config) String() string {
|
|||
return string(b)
|
||||
}
|
||||
|
||||
// ScrapeConfigs returns the scrape configurations.
|
||||
// GetScrapeConfigs returns the scrape configurations.
|
||||
func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
||||
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
|
||||
|
||||
|
@ -385,6 +388,11 @@ type GlobalConfig struct {
|
|||
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
||||
// The default timeout when scraping targets.
|
||||
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
||||
// The protocols to negotiate during a scrape. It tells clients what
|
||||
// protocol are accepted by Prometheus and with what weight (most wanted is first).
|
||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||
// How frequently to evaluate rules by default.
|
||||
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
|
||||
// File to which PromQL queries are logged.
|
||||
|
@ -414,6 +422,68 @@ type GlobalConfig struct {
|
|||
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||
}
|
||||
|
||||
// ScrapeProtocol represents supported protocol for scraping metrics.
|
||||
type ScrapeProtocol string
|
||||
|
||||
// Validate returns error if given scrape protocol is not supported.
|
||||
func (s ScrapeProtocol) Validate() error {
|
||||
if _, ok := ScrapeProtocolsHeaders[s]; !ok {
|
||||
return fmt.Errorf("unknown scrape protocol %v, supported: %v",
|
||||
s, func() (ret []string) {
|
||||
for k := range ScrapeProtocolsHeaders {
|
||||
ret = append(ret, string(k))
|
||||
}
|
||||
sort.Strings(ret)
|
||||
return ret
|
||||
}())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
PrometheusProto ScrapeProtocol = "PrometheusProto"
|
||||
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
|
||||
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
|
||||
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
|
||||
|
||||
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
|
||||
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
||||
PrometheusText0_0_4: "text/plain;version=0.0.4",
|
||||
OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1",
|
||||
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
|
||||
}
|
||||
|
||||
DefaultScrapeProtocols = []ScrapeProtocol{
|
||||
OpenMetricsText1_0_0,
|
||||
OpenMetricsText0_0_1,
|
||||
PrometheusText0_0_4,
|
||||
}
|
||||
DefaultNativeHistogramScrapeProtocols = []ScrapeProtocol{
|
||||
PrometheusProto,
|
||||
OpenMetricsText1_0_0,
|
||||
OpenMetricsText0_0_1,
|
||||
PrometheusText0_0_4,
|
||||
}
|
||||
)
|
||||
|
||||
// validateAcceptScrapeProtocols return errors if we see problems with accept scrape protocols option.
|
||||
func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error {
|
||||
if len(sps) == 0 {
|
||||
return errors.New("scrape_protocols cannot be empty")
|
||||
}
|
||||
dups := map[string]struct{}{}
|
||||
for _, sp := range sps {
|
||||
if _, ok := dups[strings.ToLower(string(sp))]; ok {
|
||||
return fmt.Errorf("duplicated protocol in scrape_protocols, got %v", sps)
|
||||
}
|
||||
if err := sp.Validate(); err != nil {
|
||||
return fmt.Errorf("scrape_protocols: %w", err)
|
||||
}
|
||||
dups[strings.ToLower(string(sp))] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *GlobalConfig) SetDirectory(dir string) {
|
||||
c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile)
|
||||
|
@ -459,6 +529,14 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if gc.EvaluationInterval == 0 {
|
||||
gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval
|
||||
}
|
||||
|
||||
if gc.ScrapeProtocols == nil {
|
||||
gc.ScrapeProtocols = DefaultGlobalConfig.ScrapeProtocols
|
||||
}
|
||||
if err := validateAcceptScrapeProtocols(gc.ScrapeProtocols); err != nil {
|
||||
return fmt.Errorf("%w for global config", err)
|
||||
}
|
||||
|
||||
*c = *gc
|
||||
return nil
|
||||
}
|
||||
|
@ -469,7 +547,8 @@ func (c *GlobalConfig) isZero() bool {
|
|||
c.ScrapeInterval == 0 &&
|
||||
c.ScrapeTimeout == 0 &&
|
||||
c.EvaluationInterval == 0 &&
|
||||
c.QueryLogFile == ""
|
||||
c.QueryLogFile == "" &&
|
||||
c.ScrapeProtocols == nil
|
||||
}
|
||||
|
||||
type ScrapeConfigs struct {
|
||||
|
@ -490,6 +569,11 @@ type ScrapeConfig struct {
|
|||
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
||||
// The timeout for scraping targets of this config.
|
||||
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
||||
// The protocols to negotiate during a scrape. It tells clients what
|
||||
// protocol are accepted by Prometheus and with what preference (most wanted is first).
|
||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||
// Whether to scrape a classic histogram that is also exposed as a native histogram.
|
||||
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
|
||||
// The HTTP resource path on which to fetch metrics from targets.
|
||||
|
@ -577,6 +661,7 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Validate validates scrape config, but also fills relevant default values from global config if needed.
|
||||
func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||
if c == nil {
|
||||
return errors.New("empty or null scrape config section")
|
||||
|
@ -618,6 +703,13 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
c.KeepDroppedTargets = globalConfig.KeepDroppedTargets
|
||||
}
|
||||
|
||||
if c.ScrapeProtocols == nil {
|
||||
c.ScrapeProtocols = globalConfig.ScrapeProtocols
|
||||
}
|
||||
if err := validateAcceptScrapeProtocols(c.ScrapeProtocols); err != nil {
|
||||
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
},
|
||||
|
||||
RuleFiles: []string{
|
||||
|
@ -191,6 +192,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -292,6 +294,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: 35,
|
||||
LabelNameLengthLimit: 210,
|
||||
LabelValueLengthLimit: 210,
|
||||
ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4},
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BasicAuth: &config.BasicAuth{
|
||||
|
@ -387,6 +390,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -440,6 +444,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
|
@ -471,6 +476,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -508,6 +514,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -545,6 +552,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -571,6 +579,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -606,6 +615,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -638,6 +648,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -677,6 +688,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -706,6 +718,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -738,6 +751,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -763,6 +777,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -791,6 +806,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: "/federate",
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -819,6 +835,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -847,6 +864,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -872,6 +890,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -905,6 +924,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -937,6 +957,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -965,6 +986,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -993,6 +1015,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1025,6 +1048,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1060,6 +1084,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1114,6 +1139,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1139,6 +1165,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1175,6 +1202,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1217,6 +1245,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1250,6 +1279,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1277,6 +1307,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1307,6 +1338,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1925,6 +1957,14 @@ var expectedErrors = []struct {
|
|||
filename: "scrape_config_files_glob.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_glob.bad.yml: invalid scrape config file path "scrape_configs/*/*"`,
|
||||
},
|
||||
{
|
||||
filename: "scrape_config_files_scrape_protocols.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4] for scrape config with job name "node"`,
|
||||
},
|
||||
{
|
||||
filename: "scrape_config_files_scrape_protocols2.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`,
|
||||
},
|
||||
}
|
||||
|
||||
func TestBadConfigs(t *testing.T) {
|
||||
|
@ -2018,6 +2058,8 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
HonorTimestamps: true,
|
||||
ScrapeInterval: scrapeInterval,
|
||||
ScrapeTimeout: scrapeTimeout,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
|
@ -2071,6 +2113,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(60 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -2101,6 +2144,8 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
|
|
2
config/testdata/conf.good.yml
vendored
2
config/testdata/conf.good.yml
vendored
|
@ -114,6 +114,7 @@ scrape_configs:
|
|||
|
||||
scrape_interval: 50s
|
||||
scrape_timeout: 5s
|
||||
scrape_protocols: ["PrometheusText0.0.4"]
|
||||
|
||||
body_size_limit: 10MB
|
||||
sample_limit: 1000
|
||||
|
@ -122,7 +123,6 @@ scrape_configs:
|
|||
label_name_length_limit: 210
|
||||
label_value_length_limit: 210
|
||||
|
||||
|
||||
metrics_path: /my_path
|
||||
scheme: https
|
||||
|
||||
|
|
5
config/testdata/scrape_config_files_scrape_protocols.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_scrape_protocols.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
scrape_configs:
|
||||
- job_name: node
|
||||
scrape_protocols: ["prometheusproto"]
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
5
config/testdata/scrape_config_files_scrape_protocols2.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_scrape_protocols2.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
scrape_configs:
|
||||
- job_name: node
|
||||
scrape_protocols: ["OpenMetricsText1.0.0", "PrometheusProto", "OpenMetricsText1.0.0"]
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
|
@ -61,6 +61,13 @@ global:
|
|||
# How long until a scrape request times out.
|
||||
[ scrape_timeout: <duration> | default = 10s ]
|
||||
|
||||
# The protocols to negotiate during a scrape with the client.
|
||||
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
# The default value changes to [ PrometheusProto, OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4 ]
|
||||
# when native_histogram feature flag is set.
|
||||
[ scrape_protocols: [<string>, ...] | default = [ OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4 ] ]
|
||||
|
||||
# How frequently to evaluate rules.
|
||||
[ evaluation_interval: <duration> | default = 1m ]
|
||||
|
||||
|
@ -171,6 +178,11 @@ job_name: <job_name>
|
|||
# Per-scrape timeout when scraping this job.
|
||||
[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]
|
||||
|
||||
# The protocols to negotiate during a scrape with the client.
|
||||
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
[ scrape_protocols: [<string>, ...] | default = <global_config.scrape_protocols> ]
|
||||
|
||||
# Whether to scrape a classic histogram that is also exposed as a native
|
||||
# histogram (has no effect without --enable-feature=native-histograms).
|
||||
[ scrape_classic_histograms: <boolean> | default = false ]
|
||||
|
|
|
@ -132,9 +132,6 @@ type Options struct {
|
|||
// Option to enable the experimental in-memory metadata storage and append
|
||||
// metadata to the WAL.
|
||||
EnableMetadataStorage bool
|
||||
// Option to enable protobuf negotiation with the client. Note that the client can already
|
||||
// send protobuf without needing to enable this.
|
||||
EnableProtobufNegotiation bool
|
||||
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
||||
DiscoveryReloadInterval model.Duration
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -250,8 +251,6 @@ type scrapePool struct {
|
|||
newLoop func(scrapeLoopOptions) loop
|
||||
|
||||
noDefaultPort bool
|
||||
|
||||
enableProtobufNegotiation bool
|
||||
}
|
||||
|
||||
type labelLimits struct {
|
||||
|
@ -305,7 +304,6 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
|||
logger: logger,
|
||||
httpOpts: options.HTTPClientOptions,
|
||||
noDefaultPort: options.NoDefaultPort,
|
||||
enableProtobufNegotiation: options.EnableProtobufNegotiation,
|
||||
}
|
||||
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
||||
// Update the targets retrieval function for metadata to a new scrape cache.
|
||||
|
@ -456,12 +454,14 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
|
||||
t := sp.activeTargets[fp]
|
||||
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
|
||||
acceptHeader := scrapeAcceptHeader
|
||||
if sp.enableProtobufNegotiation {
|
||||
acceptHeader = scrapeAcceptHeaderWithProtobuf
|
||||
}
|
||||
var (
|
||||
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||
s = &targetScraper{
|
||||
Target: t,
|
||||
client: sp.client,
|
||||
timeout: timeout,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
|
||||
}
|
||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
scraper: s,
|
||||
|
@ -577,11 +577,13 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
// for every target.
|
||||
var err error
|
||||
interval, timeout, err = t.intervalAndTimeout(interval, timeout)
|
||||
acceptHeader := scrapeAcceptHeader
|
||||
if sp.enableProtobufNegotiation {
|
||||
acceptHeader = scrapeAcceptHeaderWithProtobuf
|
||||
s := &targetScraper{
|
||||
Target: t,
|
||||
client: sp.client,
|
||||
timeout: timeout,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
|
||||
}
|
||||
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||
l := sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
scraper: s,
|
||||
|
@ -808,10 +810,20 @@ type targetScraper struct {
|
|||
|
||||
var errBodySizeLimit = errors.New("body size limit exceeded")
|
||||
|
||||
const (
|
||||
scrapeAcceptHeader = `application/openmetrics-text;version=1.0.0,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
scrapeAcceptHeaderWithProtobuf = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.8,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
)
|
||||
// acceptHeader transforms preference from the options into specific header values as
|
||||
// https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines.
|
||||
// No validation is here, we expect scrape protocols to be validated already.
|
||||
func acceptHeader(sps []config.ScrapeProtocol) string {
|
||||
var vals []string
|
||||
weight := len(config.ScrapeProtocolsHeaders) + 1
|
||||
for _, sp := range sps {
|
||||
vals = append(vals, fmt.Sprintf("%s;q=0.%d", config.ScrapeProtocolsHeaders[sp], weight))
|
||||
weight--
|
||||
}
|
||||
// Default match anything.
|
||||
vals = append(vals, fmt.Sprintf("*/*;q=%d", weight))
|
||||
return strings.Join(vals, ",")
|
||||
}
|
||||
|
||||
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
|
|
|
@ -2627,9 +2627,9 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
|
||||
}
|
||||
|
||||
runTest(scrapeAcceptHeader)
|
||||
runTest(acceptHeader(config.DefaultScrapeProtocols))
|
||||
protobufParsing = true
|
||||
runTest(scrapeAcceptHeaderWithProtobuf)
|
||||
runTest(acceptHeader(config.DefaultNativeHistogramScrapeProtocols))
|
||||
}
|
||||
|
||||
func TestTargetScrapeScrapeCancel(t *testing.T) {
|
||||
|
@ -2655,7 +2655,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
|
|||
),
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
acceptHeader: scrapeAcceptHeader,
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
|
@ -2710,7 +2710,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
|
|||
),
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
acceptHeader: scrapeAcceptHeader,
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
|
||||
}
|
||||
|
||||
resp, err := ts.scrape(context.Background())
|
||||
|
@ -2754,7 +2754,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
|
|||
},
|
||||
client: http.DefaultClient,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: scrapeAcceptHeader,
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
|
||||
|
|
Loading…
Reference in a new issue