config: extract SD and HTTPClient configurations

This commit is contained in:
Fabian Reinartz 2016-11-23 12:41:19 +01:00
parent dd1a656cc4
commit 200bbe1bad
9 changed files with 240 additions and 164 deletions

View file

@ -102,15 +102,15 @@ func checkConfig(t cli.Term, filename string) ([]string, error) {
} }
for _, scfg := range cfg.ScrapeConfigs { for _, scfg := range cfg.ScrapeConfigs {
if err := checkFileExists(scfg.BearerTokenFile); err != nil { if err := checkFileExists(scfg.HTTPClientConfig.BearerTokenFile); err != nil {
return nil, fmt.Errorf("error checking bearer token file %q: %s", scfg.BearerTokenFile, err) return nil, fmt.Errorf("error checking bearer token file %q: %s", scfg.HTTPClientConfig.BearerTokenFile, err)
} }
if err := checkTLSConfig(scfg.TLSConfig); err != nil { if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig); err != nil {
return nil, err return nil, err
} }
for _, kd := range scfg.KubernetesSDConfigs { for _, kd := range scfg.ServiceDiscoveryConfig.KubernetesSDConfigs {
if err := checkTLSConfig(kd.TLSConfig); err != nil { if err := checkTLSConfig(kd.TLSConfig); err != nil {
return nil, err return nil, err
} }

View file

@ -87,6 +87,11 @@ var (
HonorLabels: false, HonorLabels: false,
} }
// DefaultAlertmanagersConfig is the default alertmanager configuration.
DefaultAlertmanagersConfig = AlertmanagersConfig{
Scheme: "http",
}
// DefaultRelabelConfig is the default Relabel configuration. // DefaultRelabelConfig is the default Relabel configuration.
DefaultRelabelConfig = RelabelConfig{ DefaultRelabelConfig = RelabelConfig{
Action: RelabelReplace, Action: RelabelReplace,
@ -214,20 +219,22 @@ func resolveFilepaths(baseDir string, cfg *Config) {
cfg.RuleFiles[i] = join(rf) cfg.RuleFiles[i] = join(rf)
} }
for _, scfg := range cfg.ScrapeConfigs { for _, cfg := range cfg.ScrapeConfigs {
scfg := &cfg.HTTPClientConfig
scfg.BearerTokenFile = join(scfg.BearerTokenFile) scfg.BearerTokenFile = join(scfg.BearerTokenFile)
scfg.TLSConfig.CAFile = join(scfg.TLSConfig.CAFile) scfg.TLSConfig.CAFile = join(scfg.TLSConfig.CAFile)
scfg.TLSConfig.CertFile = join(scfg.TLSConfig.CertFile) scfg.TLSConfig.CertFile = join(scfg.TLSConfig.CertFile)
scfg.TLSConfig.KeyFile = join(scfg.TLSConfig.KeyFile) scfg.TLSConfig.KeyFile = join(scfg.TLSConfig.KeyFile)
for _, kcfg := range scfg.KubernetesSDConfigs { for _, kcfg := range cfg.ServiceDiscoveryConfig.KubernetesSDConfigs {
kcfg.BearerTokenFile = join(kcfg.BearerTokenFile) kcfg.BearerTokenFile = join(kcfg.BearerTokenFile)
kcfg.TLSConfig.CAFile = join(kcfg.TLSConfig.CAFile) kcfg.TLSConfig.CAFile = join(kcfg.TLSConfig.CAFile)
kcfg.TLSConfig.CertFile = join(kcfg.TLSConfig.CertFile) kcfg.TLSConfig.CertFile = join(kcfg.TLSConfig.CertFile)
kcfg.TLSConfig.KeyFile = join(kcfg.TLSConfig.KeyFile) kcfg.TLSConfig.KeyFile = join(kcfg.TLSConfig.KeyFile)
} }
for _, mcfg := range scfg.MarathonSDConfigs { for _, mcfg := range cfg.ServiceDiscoveryConfig.MarathonSDConfigs {
mcfg.TLSConfig.CAFile = join(mcfg.TLSConfig.CAFile) mcfg.TLSConfig.CAFile = join(mcfg.TLSConfig.CAFile)
mcfg.TLSConfig.CertFile = join(mcfg.TLSConfig.CertFile) mcfg.TLSConfig.CertFile = join(mcfg.TLSConfig.CertFile)
mcfg.TLSConfig.KeyFile = join(mcfg.TLSConfig.KeyFile) mcfg.TLSConfig.KeyFile = join(mcfg.TLSConfig.KeyFile)
@ -312,11 +319,6 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
// AlertingConfig configures alerting and alertmanager related configs
type AlertingConfig struct {
AlertRelabelConfigs []*RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
}
// GlobalConfig configures values that are used across other configuration // GlobalConfig configures values that are used across other configuration
// objects. // objects.
type GlobalConfig struct { type GlobalConfig struct {
@ -404,33 +406,8 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
// ScrapeConfig configures a scraping unit for Prometheus. // ServiceDiscoveryConfig configures lists of different service discovery mechanisms.
type ScrapeConfig struct { type ServiceDiscoveryConfig struct {
// The job name to which the job label is set by default.
JobName string `yaml:"job_name"`
// Indicator whether the scraped metrics should remain unmodified.
HonorLabels bool `yaml:"honor_labels,omitempty"`
// A set of query parameters with which the target is scraped.
Params url.Values `yaml:"params,omitempty"`
// How frequently to scrape the targets of this scrape config.
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The timeout for scraping targets of this config.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
// The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"`
// The HTTP basic authentication credentials for the targets.
BasicAuth *BasicAuth `yaml:"basic_auth,omitempty"`
// The bearer token for the targets.
BearerToken string `yaml:"bearer_token,omitempty"`
// The bearer token file for the targets.
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
// HTTP proxy server to use to connect to the targets.
ProxyURL URL `yaml:"proxy_url,omitempty"`
// TLSConfig to use to connect to the targets.
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
// List of labeled target groups for this job. // List of labeled target groups for this job.
StaticConfigs []*TargetGroup `yaml:"static_configs,omitempty"` StaticConfigs []*TargetGroup `yaml:"static_configs,omitempty"`
// List of DNS service discovery configurations. // List of DNS service discovery configurations.
@ -454,6 +431,62 @@ type ScrapeConfig struct {
// List of Azure service discovery configurations. // List of Azure service discovery configurations.
AzureSDConfigs []*AzureSDConfig `yaml:"azure_sd_configs,omitempty"` AzureSDConfigs []*AzureSDConfig `yaml:"azure_sd_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ServiceDiscoveryConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain ServiceDiscoveryConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
if err := checkOverflow(c.XXX, "TLS config"); err != nil {
return err
}
return nil
}
// HTTPClientConfig configures an HTTP client.
type HTTPClientConfig struct {
// The HTTP basic authentication credentials for the targets.
BasicAuth *BasicAuth `yaml:"basic_auth,omitempty"`
// The bearer token for the targets.
BearerToken string `yaml:"bearer_token,omitempty"`
// The bearer token file for the targets.
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
// HTTP proxy server to use to connect to the targets.
ProxyURL URL `yaml:"proxy_url,omitempty"`
// TLSConfig to use to connect to the targets.
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// ScrapeConfig configures a scraping unit for Prometheus.
type ScrapeConfig struct {
// The job name to which the job label is set by default.
JobName string `yaml:"job_name"`
// Indicator whether the scraped metrics should remain unmodified.
HonorLabels bool `yaml:"honor_labels,omitempty"`
// A set of query parameters with which the target is scraped.
Params url.Values `yaml:"params,omitempty"`
// How frequently to scrape the targets of this scrape config.
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The timeout for scraping targets of this config.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
// The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
ServiceDiscoveryConfig ServiceDiscoveryConfig `yaml:",inline"`
HTTPClientConfig HTTPClientConfig `yaml:",inline"`
// List of target relabel configurations. // List of target relabel configurations.
RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"`
// List of metric relabel configurations. // List of metric relabel configurations.
@ -477,15 +510,19 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if len(c.JobName) == 0 { if len(c.JobName) == 0 {
return fmt.Errorf("job_name is empty") return fmt.Errorf("job_name is empty")
} }
if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
if len(c.HTTPClientConfig.BearerToken) > 0 && len(c.HTTPClientConfig.BearerTokenFile) > 0 {
return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured") return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured")
} }
if c.BasicAuth != nil && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { if c.HTTPClientConfig.BasicAuth != nil && (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) {
return fmt.Errorf("at most one of basic_auth, bearer_token & bearer_token_file must be configured") return fmt.Errorf("at most one of basic_auth, bearer_token & bearer_token_file must be configured")
} }
// Check for users putting URLs in target groups. // Check for users putting URLs in target groups.
if len(c.RelabelConfigs) == 0 { if len(c.RelabelConfigs) == 0 {
for _, tg := range c.StaticConfigs { for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
for _, t := range tg.Targets { for _, t := range tg.Targets {
if err = CheckTargetAddress(t[model.AddressLabel]); err != nil { if err = CheckTargetAddress(t[model.AddressLabel]); err != nil {
return err return err
@ -496,6 +533,29 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
// AlertingConfig configures alerting and alertmanager related configs
type AlertingConfig struct {
AlertRelabelConfigs []*RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// Create a clean global config as the previous one was already populated
// by the default due to the YAML parser behavior for empty blocks.
*c = AlertingConfig{}
type plain AlertingConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
if err := checkOverflow(c.XXX, "alerting config"); err != nil {
return err
}
return nil
}
// CheckTargetAddress checks if target address is valid. // CheckTargetAddress checks if target address is valid.
func CheckTargetAddress(address model.LabelValue) error { func CheckTargetAddress(address model.LabelValue) error {
// For now check for a URL, we may want to expand this later. // For now check for a URL, we may want to expand this later.

View file

@ -68,8 +68,11 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: HTTPClientConfig{
BearerTokenFile: "testdata/valid_token_file", BearerTokenFile: "testdata/valid_token_file",
},
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
StaticConfigs: []*TargetGroup{ StaticConfigs: []*TargetGroup{
{ {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
@ -93,6 +96,7 @@ var expectedConf = &Config{
RefreshInterval: model.Duration(5 * time.Minute), RefreshInterval: model.Duration(5 * time.Minute),
}, },
}, },
},
RelabelConfigs: []*RelabelConfig{ RelabelConfigs: []*RelabelConfig{
{ {
@ -130,13 +134,16 @@ var expectedConf = &Config{
ScrapeInterval: model.Duration(50 * time.Second), ScrapeInterval: model.Duration(50 * time.Second),
ScrapeTimeout: model.Duration(5 * time.Second), ScrapeTimeout: model.Duration(5 * time.Second),
HTTPClientConfig: HTTPClientConfig{
BasicAuth: &BasicAuth{ BasicAuth: &BasicAuth{
Username: "admin_name", Username: "admin_name",
Password: "admin_password", Password: "admin_password",
}, },
},
MetricsPath: "/my_path", MetricsPath: "/my_path",
Scheme: "https", Scheme: "https",
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
DNSSDConfigs: []*DNSSDConfig{ DNSSDConfigs: []*DNSSDConfig{
{ {
Names: []string{ Names: []string{
@ -154,6 +161,7 @@ var expectedConf = &Config{
Type: "SRV", Type: "SRV",
}, },
}, },
},
RelabelConfigs: []*RelabelConfig{ RelabelConfigs: []*RelabelConfig{
{ {
@ -205,6 +213,7 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
ConsulSDConfigs: []*ConsulSDConfig{ ConsulSDConfigs: []*ConsulSDConfig{
{ {
Server: "localhost:1234", Server: "localhost:1234",
@ -213,6 +222,7 @@ var expectedConf = &Config{
Scheme: DefaultConsulSDConfig.Scheme, Scheme: DefaultConsulSDConfig.Scheme,
}, },
}, },
},
RelabelConfigs: []*RelabelConfig{ RelabelConfigs: []*RelabelConfig{
{ {
@ -234,6 +244,7 @@ var expectedConf = &Config{
MetricsPath: "/metrics", MetricsPath: "/metrics",
Scheme: "http", Scheme: "http",
HTTPClientConfig: HTTPClientConfig{
TLSConfig: TLSConfig{ TLSConfig: TLSConfig{
CertFile: "testdata/valid_cert_file", CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file", KeyFile: "testdata/valid_key_file",
@ -241,6 +252,7 @@ var expectedConf = &Config{
BearerToken: "avalidtoken", BearerToken: "avalidtoken",
}, },
},
{ {
JobName: "service-kubernetes", JobName: "service-kubernetes",
@ -250,6 +262,7 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
KubernetesSDConfigs: []*KubernetesSDConfig{ KubernetesSDConfigs: []*KubernetesSDConfig{
{ {
APIServer: kubernetesSDHostURL(), APIServer: kubernetesSDHostURL(),
@ -261,6 +274,7 @@ var expectedConf = &Config{
}, },
}, },
}, },
},
{ {
JobName: "service-marathon", JobName: "service-marathon",
@ -270,6 +284,7 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
MarathonSDConfigs: []*MarathonSDConfig{ MarathonSDConfigs: []*MarathonSDConfig{
{ {
Servers: []string{ Servers: []string{
@ -284,6 +299,7 @@ var expectedConf = &Config{
}, },
}, },
}, },
},
{ {
JobName: "service-ec2", JobName: "service-ec2",
@ -293,6 +309,7 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
EC2SDConfigs: []*EC2SDConfig{ EC2SDConfigs: []*EC2SDConfig{
{ {
Region: "us-east-1", Region: "us-east-1",
@ -304,6 +321,7 @@ var expectedConf = &Config{
}, },
}, },
}, },
},
{ {
JobName: "service-azure", JobName: "service-azure",
@ -313,6 +331,7 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
AzureSDConfigs: []*AzureSDConfig{ AzureSDConfigs: []*AzureSDConfig{
{ {
SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11", SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11",
@ -324,6 +343,7 @@ var expectedConf = &Config{
}, },
}, },
}, },
},
{ {
JobName: "service-nerve", JobName: "service-nerve",
@ -333,6 +353,7 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
NerveSDConfigs: []*NerveSDConfig{ NerveSDConfigs: []*NerveSDConfig{
{ {
Servers: []string{"localhost"}, Servers: []string{"localhost"},
@ -341,6 +362,7 @@ var expectedConf = &Config{
}, },
}, },
}, },
},
{ {
JobName: "0123service-xxx", JobName: "0123service-xxx",
@ -350,6 +372,7 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
StaticConfigs: []*TargetGroup{ StaticConfigs: []*TargetGroup{
{ {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
@ -358,6 +381,7 @@ var expectedConf = &Config{
}, },
}, },
}, },
},
{ {
JobName: "測試", JobName: "測試",
@ -367,6 +391,7 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
StaticConfigs: []*TargetGroup{ StaticConfigs: []*TargetGroup{
{ {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
@ -376,6 +401,7 @@ var expectedConf = &Config{
}, },
}, },
}, },
},
original: "", original: "",
} }

View file

@ -49,7 +49,7 @@ type TargetProvider interface {
} }
// ProvidersFromConfig returns all TargetProviders configured in cfg. // ProvidersFromConfig returns all TargetProviders configured in cfg.
func ProvidersFromConfig(cfg *config.ScrapeConfig) map[string]TargetProvider { func ProvidersFromConfig(cfg config.ServiceDiscoveryConfig) map[string]TargetProvider {
providers := map[string]TargetProvider{} providers := map[string]TargetProvider{}
app := func(mech string, i int, tp TargetProvider) { app := func(mech string, i int, tp TargetProvider) {

View file

@ -33,15 +33,14 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
} }
} }
scrapeConfig := &config.ScrapeConfig{} cfg := &config.ServiceDiscoveryConfig{}
sOne := ` sOne := `
job_name: "foo"
static_configs: static_configs:
- targets: ["foo:9090"] - targets: ["foo:9090"]
- targets: ["bar:9090"] - targets: ["bar:9090"]
` `
if err := yaml.Unmarshal([]byte(sOne), scrapeConfig); err != nil { if err := yaml.Unmarshal([]byte(sOne), cfg); err != nil {
t.Fatalf("Unable to load YAML config sOne: %s", err) t.Fatalf("Unable to load YAML config sOne: %s", err)
} }
called := make(chan struct{}) called := make(chan struct{})
@ -54,22 +53,21 @@ static_configs:
go ts.Run(ctx) go ts.Run(ctx)
ts.UpdateProviders(ProvidersFromConfig(scrapeConfig)) ts.UpdateProviders(ProvidersFromConfig(*cfg))
<-called <-called
verifyPresence(ts.tgroups, "static/0/0", true) verifyPresence(ts.tgroups, "static/0/0", true)
verifyPresence(ts.tgroups, "static/0/1", true) verifyPresence(ts.tgroups, "static/0/1", true)
sTwo := ` sTwo := `
job_name: "foo"
static_configs: static_configs:
- targets: ["foo:9090"] - targets: ["foo:9090"]
` `
if err := yaml.Unmarshal([]byte(sTwo), scrapeConfig); err != nil { if err := yaml.Unmarshal([]byte(sTwo), cfg); err != nil {
t.Fatalf("Unable to load YAML config sTwo: %s", err) t.Fatalf("Unable to load YAML config sTwo: %s", err)
} }
ts.UpdateProviders(ProvidersFromConfig(scrapeConfig)) ts.UpdateProviders(ProvidersFromConfig(*cfg))
<-called <-called
verifyPresence(ts.tgroups, "static/0/0", true) verifyPresence(ts.tgroups, "static/0/0", true)

View file

@ -106,7 +106,7 @@ type scrapePool struct {
} }
func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app storage.SampleAppender) *scrapePool { func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app storage.SampleAppender) *scrapePool {
client, err := NewHTTPClient(cfg) client, err := NewHTTPClient(cfg.HTTPClientConfig)
if err != nil { if err != nil {
// Any errors that could occur here should be caught during config validation. // Any errors that could occur here should be caught during config validation.
log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err) log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)
@ -153,7 +153,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
sp.mtx.Lock() sp.mtx.Lock()
defer sp.mtx.Unlock() defer sp.mtx.Unlock()
client, err := NewHTTPClient(cfg) client, err := NewHTTPClient(cfg.HTTPClientConfig)
if err != nil { if err != nil {
// Any errors that could occur here should be caught during config validation. // Any errors that could occur here should be caught during config validation.
log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err) log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)

View file

@ -68,7 +68,7 @@ func NewTarget(labels, metaLabels model.LabelSet, params url.Values) *Target {
} }
// NewHTTPClient returns a new HTTP client configured for the given scrape configuration. // NewHTTPClient returns a new HTTP client configured for the given scrape configuration.
func NewHTTPClient(cfg *config.ScrapeConfig) (*http.Client, error) { func NewHTTPClient(cfg config.HTTPClientConfig) (*http.Client, error) {
tlsConfig, err := httputil.NewTLSConfig(cfg.TLSConfig) tlsConfig, err := httputil.NewTLSConfig(cfg.TLSConfig)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -151,8 +151,7 @@ func TestNewHTTPBearerToken(t *testing.T) {
) )
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
BearerToken: "1234", BearerToken: "1234",
} }
c, err := NewHTTPClient(cfg) c, err := NewHTTPClient(cfg)
@ -179,8 +178,7 @@ func TestNewHTTPBearerTokenFile(t *testing.T) {
) )
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
BearerTokenFile: "testdata/bearertoken.txt", BearerTokenFile: "testdata/bearertoken.txt",
} }
c, err := NewHTTPClient(cfg) c, err := NewHTTPClient(cfg)
@ -206,8 +204,7 @@ func TestNewHTTPBasicAuth(t *testing.T) {
) )
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
BasicAuth: &config.BasicAuth{ BasicAuth: &config.BasicAuth{
Username: "user", Username: "user",
Password: "password123", Password: "password123",
@ -236,8 +233,7 @@ func TestNewHTTPCACert(t *testing.T) {
server.StartTLS() server.StartTLS()
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
TLSConfig: config.TLSConfig{ TLSConfig: config.TLSConfig{
CAFile: caCertPath, CAFile: caCertPath,
}, },
@ -269,8 +265,7 @@ func TestNewHTTPClientCert(t *testing.T) {
server.StartTLS() server.StartTLS()
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
TLSConfig: config.TLSConfig{ TLSConfig: config.TLSConfig{
CAFile: caCertPath, CAFile: caCertPath,
CertFile: "testdata/client.cer", CertFile: "testdata/client.cer",
@ -300,8 +295,7 @@ func TestNewHTTPWithServerName(t *testing.T) {
server.StartTLS() server.StartTLS()
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
TLSConfig: config.TLSConfig{ TLSConfig: config.TLSConfig{
CAFile: caCertPath, CAFile: caCertPath,
ServerName: "prometheus.rocks", ServerName: "prometheus.rocks",
@ -330,8 +324,7 @@ func TestNewHTTPWithBadServerName(t *testing.T) {
server.StartTLS() server.StartTLS()
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
TLSConfig: config.TLSConfig{ TLSConfig: config.TLSConfig{
CAFile: caCertPath, CAFile: caCertPath,
ServerName: "badname", ServerName: "badname",
@ -369,8 +362,7 @@ func newTLSConfig(certName string, t *testing.T) *tls.Config {
} }
func TestNewClientWithBadTLSConfig(t *testing.T) { func TestNewClientWithBadTLSConfig(t *testing.T) {
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
TLSConfig: config.TLSConfig{ TLSConfig: config.TLSConfig{
CAFile: "testdata/nonexistent_ca.cer", CAFile: "testdata/nonexistent_ca.cer",
CertFile: "testdata/nonexistent_client.cer", CertFile: "testdata/nonexistent_client.cer",

View file

@ -120,7 +120,7 @@ func (tm *TargetManager) reload() {
} else { } else {
ts.sp.reload(scfg) ts.sp.reload(scfg)
} }
ts.ts.UpdateProviders(discovery.ProvidersFromConfig(scfg)) ts.ts.UpdateProviders(discovery.ProvidersFromConfig(scfg.ServiceDiscoveryConfig))
} }
// Remove old target sets. Waiting for scrape pools to complete pending // Remove old target sets. Waiting for scrape pools to complete pending