config: extract SD and HTTPClient configurations

This commit is contained in:
Fabian Reinartz 2016-11-23 12:41:19 +01:00
parent dd1a656cc4
commit 200bbe1bad
9 changed files with 240 additions and 164 deletions

View file

@ -102,15 +102,15 @@ func checkConfig(t cli.Term, filename string) ([]string, error) {
} }
for _, scfg := range cfg.ScrapeConfigs { for _, scfg := range cfg.ScrapeConfigs {
if err := checkFileExists(scfg.BearerTokenFile); err != nil { if err := checkFileExists(scfg.HTTPClientConfig.BearerTokenFile); err != nil {
return nil, fmt.Errorf("error checking bearer token file %q: %s", scfg.BearerTokenFile, err) return nil, fmt.Errorf("error checking bearer token file %q: %s", scfg.HTTPClientConfig.BearerTokenFile, err)
} }
if err := checkTLSConfig(scfg.TLSConfig); err != nil { if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig); err != nil {
return nil, err return nil, err
} }
for _, kd := range scfg.KubernetesSDConfigs { for _, kd := range scfg.ServiceDiscoveryConfig.KubernetesSDConfigs {
if err := checkTLSConfig(kd.TLSConfig); err != nil { if err := checkTLSConfig(kd.TLSConfig); err != nil {
return nil, err return nil, err
} }

View file

@ -87,6 +87,11 @@ var (
HonorLabels: false, HonorLabels: false,
} }
// DefaultAlertmanagersConfig is the default alertmanager configuration.
DefaultAlertmanagersConfig = AlertmanagersConfig{
Scheme: "http",
}
// DefaultRelabelConfig is the default Relabel configuration. // DefaultRelabelConfig is the default Relabel configuration.
DefaultRelabelConfig = RelabelConfig{ DefaultRelabelConfig = RelabelConfig{
Action: RelabelReplace, Action: RelabelReplace,
@ -214,20 +219,22 @@ func resolveFilepaths(baseDir string, cfg *Config) {
cfg.RuleFiles[i] = join(rf) cfg.RuleFiles[i] = join(rf)
} }
for _, scfg := range cfg.ScrapeConfigs { for _, cfg := range cfg.ScrapeConfigs {
scfg := &cfg.HTTPClientConfig
scfg.BearerTokenFile = join(scfg.BearerTokenFile) scfg.BearerTokenFile = join(scfg.BearerTokenFile)
scfg.TLSConfig.CAFile = join(scfg.TLSConfig.CAFile) scfg.TLSConfig.CAFile = join(scfg.TLSConfig.CAFile)
scfg.TLSConfig.CertFile = join(scfg.TLSConfig.CertFile) scfg.TLSConfig.CertFile = join(scfg.TLSConfig.CertFile)
scfg.TLSConfig.KeyFile = join(scfg.TLSConfig.KeyFile) scfg.TLSConfig.KeyFile = join(scfg.TLSConfig.KeyFile)
for _, kcfg := range scfg.KubernetesSDConfigs { for _, kcfg := range cfg.ServiceDiscoveryConfig.KubernetesSDConfigs {
kcfg.BearerTokenFile = join(kcfg.BearerTokenFile) kcfg.BearerTokenFile = join(kcfg.BearerTokenFile)
kcfg.TLSConfig.CAFile = join(kcfg.TLSConfig.CAFile) kcfg.TLSConfig.CAFile = join(kcfg.TLSConfig.CAFile)
kcfg.TLSConfig.CertFile = join(kcfg.TLSConfig.CertFile) kcfg.TLSConfig.CertFile = join(kcfg.TLSConfig.CertFile)
kcfg.TLSConfig.KeyFile = join(kcfg.TLSConfig.KeyFile) kcfg.TLSConfig.KeyFile = join(kcfg.TLSConfig.KeyFile)
} }
for _, mcfg := range scfg.MarathonSDConfigs { for _, mcfg := range cfg.ServiceDiscoveryConfig.MarathonSDConfigs {
mcfg.TLSConfig.CAFile = join(mcfg.TLSConfig.CAFile) mcfg.TLSConfig.CAFile = join(mcfg.TLSConfig.CAFile)
mcfg.TLSConfig.CertFile = join(mcfg.TLSConfig.CertFile) mcfg.TLSConfig.CertFile = join(mcfg.TLSConfig.CertFile)
mcfg.TLSConfig.KeyFile = join(mcfg.TLSConfig.KeyFile) mcfg.TLSConfig.KeyFile = join(mcfg.TLSConfig.KeyFile)
@ -312,11 +319,6 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
// AlertingConfig configures alerting and alertmanager related configs
type AlertingConfig struct {
AlertRelabelConfigs []*RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
}
// GlobalConfig configures values that are used across other configuration // GlobalConfig configures values that are used across other configuration
// objects. // objects.
type GlobalConfig struct { type GlobalConfig struct {
@ -404,33 +406,8 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
// ScrapeConfig configures a scraping unit for Prometheus. // ServiceDiscoveryConfig configures lists of different service discovery mechanisms.
type ScrapeConfig struct { type ServiceDiscoveryConfig struct {
// The job name to which the job label is set by default.
JobName string `yaml:"job_name"`
// Indicator whether the scraped metrics should remain unmodified.
HonorLabels bool `yaml:"honor_labels,omitempty"`
// A set of query parameters with which the target is scraped.
Params url.Values `yaml:"params,omitempty"`
// How frequently to scrape the targets of this scrape config.
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The timeout for scraping targets of this config.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
// The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"`
// The HTTP basic authentication credentials for the targets.
BasicAuth *BasicAuth `yaml:"basic_auth,omitempty"`
// The bearer token for the targets.
BearerToken string `yaml:"bearer_token,omitempty"`
// The bearer token file for the targets.
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
// HTTP proxy server to use to connect to the targets.
ProxyURL URL `yaml:"proxy_url,omitempty"`
// TLSConfig to use to connect to the targets.
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
// List of labeled target groups for this job. // List of labeled target groups for this job.
StaticConfigs []*TargetGroup `yaml:"static_configs,omitempty"` StaticConfigs []*TargetGroup `yaml:"static_configs,omitempty"`
// List of DNS service discovery configurations. // List of DNS service discovery configurations.
@ -454,6 +431,62 @@ type ScrapeConfig struct {
// List of Azure service discovery configurations. // List of Azure service discovery configurations.
AzureSDConfigs []*AzureSDConfig `yaml:"azure_sd_configs,omitempty"` AzureSDConfigs []*AzureSDConfig `yaml:"azure_sd_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ServiceDiscoveryConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain ServiceDiscoveryConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
if err := checkOverflow(c.XXX, "TLS config"); err != nil {
return err
}
return nil
}
// HTTPClientConfig configures an HTTP client.
type HTTPClientConfig struct {
// The HTTP basic authentication credentials for the targets.
BasicAuth *BasicAuth `yaml:"basic_auth,omitempty"`
// The bearer token for the targets.
BearerToken string `yaml:"bearer_token,omitempty"`
// The bearer token file for the targets.
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
// HTTP proxy server to use to connect to the targets.
ProxyURL URL `yaml:"proxy_url,omitempty"`
// TLSConfig to use to connect to the targets.
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// ScrapeConfig configures a scraping unit for Prometheus.
type ScrapeConfig struct {
// The job name to which the job label is set by default.
JobName string `yaml:"job_name"`
// Indicator whether the scraped metrics should remain unmodified.
HonorLabels bool `yaml:"honor_labels,omitempty"`
// A set of query parameters with which the target is scraped.
Params url.Values `yaml:"params,omitempty"`
// How frequently to scrape the targets of this scrape config.
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The timeout for scraping targets of this config.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
// The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
ServiceDiscoveryConfig ServiceDiscoveryConfig `yaml:",inline"`
HTTPClientConfig HTTPClientConfig `yaml:",inline"`
// List of target relabel configurations. // List of target relabel configurations.
RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"`
// List of metric relabel configurations. // List of metric relabel configurations.
@ -477,15 +510,19 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if len(c.JobName) == 0 { if len(c.JobName) == 0 {
return fmt.Errorf("job_name is empty") return fmt.Errorf("job_name is empty")
} }
if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
if len(c.HTTPClientConfig.BearerToken) > 0 && len(c.HTTPClientConfig.BearerTokenFile) > 0 {
return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured") return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured")
} }
if c.BasicAuth != nil && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { if c.HTTPClientConfig.BasicAuth != nil && (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) {
return fmt.Errorf("at most one of basic_auth, bearer_token & bearer_token_file must be configured") return fmt.Errorf("at most one of basic_auth, bearer_token & bearer_token_file must be configured")
} }
// Check for users putting URLs in target groups. // Check for users putting URLs in target groups.
if len(c.RelabelConfigs) == 0 { if len(c.RelabelConfigs) == 0 {
for _, tg := range c.StaticConfigs { for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
for _, t := range tg.Targets { for _, t := range tg.Targets {
if err = CheckTargetAddress(t[model.AddressLabel]); err != nil { if err = CheckTargetAddress(t[model.AddressLabel]); err != nil {
return err return err
@ -496,6 +533,29 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
// AlertingConfig configures alerting and alertmanager related configs
type AlertingConfig struct {
AlertRelabelConfigs []*RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// Create a clean global config as the previous one was already populated
// by the default due to the YAML parser behavior for empty blocks.
*c = AlertingConfig{}
type plain AlertingConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
if err := checkOverflow(c.XXX, "alerting config"); err != nil {
return err
}
return nil
}
// CheckTargetAddress checks if target address is valid. // CheckTargetAddress checks if target address is valid.
func CheckTargetAddress(address model.LabelValue) error { func CheckTargetAddress(address model.LabelValue) error {
// For now check for a URL, we may want to expand this later. // For now check for a URL, we may want to expand this later.

View file

@ -68,29 +68,33 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
BearerTokenFile: "testdata/valid_token_file", HTTPClientConfig: HTTPClientConfig{
BearerTokenFile: "testdata/valid_token_file",
StaticConfigs: []*TargetGroup{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
{model.AddressLabel: "localhost:9191"},
},
Labels: model.LabelSet{
"my": "label",
"your": "label",
},
},
}, },
FileSDConfigs: []*FileSDConfig{ ServiceDiscoveryConfig: ServiceDiscoveryConfig{
{ StaticConfigs: []*TargetGroup{
Files: []string{"foo/*.slow.json", "foo/*.slow.yml", "single/file.yml"}, {
RefreshInterval: model.Duration(10 * time.Minute), Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
{model.AddressLabel: "localhost:9191"},
},
Labels: model.LabelSet{
"my": "label",
"your": "label",
},
},
}, },
{
Files: []string{"bar/*.yaml"}, FileSDConfigs: []*FileSDConfig{
RefreshInterval: model.Duration(5 * time.Minute), {
Files: []string{"foo/*.slow.json", "foo/*.slow.yml", "single/file.yml"},
RefreshInterval: model.Duration(10 * time.Minute),
},
{
Files: []string{"bar/*.yaml"},
RefreshInterval: model.Duration(5 * time.Minute),
},
}, },
}, },
@ -130,28 +134,32 @@ var expectedConf = &Config{
ScrapeInterval: model.Duration(50 * time.Second), ScrapeInterval: model.Duration(50 * time.Second),
ScrapeTimeout: model.Duration(5 * time.Second), ScrapeTimeout: model.Duration(5 * time.Second),
BasicAuth: &BasicAuth{ HTTPClientConfig: HTTPClientConfig{
Username: "admin_name", BasicAuth: &BasicAuth{
Password: "admin_password", Username: "admin_name",
Password: "admin_password",
},
}, },
MetricsPath: "/my_path", MetricsPath: "/my_path",
Scheme: "https", Scheme: "https",
DNSSDConfigs: []*DNSSDConfig{ ServiceDiscoveryConfig: ServiceDiscoveryConfig{
{ DNSSDConfigs: []*DNSSDConfig{
Names: []string{ {
"first.dns.address.domain.com", Names: []string{
"second.dns.address.domain.com", "first.dns.address.domain.com",
"second.dns.address.domain.com",
},
RefreshInterval: model.Duration(15 * time.Second),
Type: "SRV",
}, },
RefreshInterval: model.Duration(15 * time.Second), {
Type: "SRV", Names: []string{
}, "first.dns.address.domain.com",
{ },
Names: []string{ RefreshInterval: model.Duration(30 * time.Second),
"first.dns.address.domain.com", Type: "SRV",
}, },
RefreshInterval: model.Duration(30 * time.Second),
Type: "SRV",
}, },
}, },
@ -205,12 +213,14 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
ConsulSDConfigs: []*ConsulSDConfig{ ServiceDiscoveryConfig: ServiceDiscoveryConfig{
{ ConsulSDConfigs: []*ConsulSDConfig{
Server: "localhost:1234", {
Services: []string{"nginx", "cache", "mysql"}, Server: "localhost:1234",
TagSeparator: DefaultConsulSDConfig.TagSeparator, Services: []string{"nginx", "cache", "mysql"},
Scheme: DefaultConsulSDConfig.Scheme, TagSeparator: DefaultConsulSDConfig.TagSeparator,
Scheme: DefaultConsulSDConfig.Scheme,
},
}, },
}, },
@ -234,12 +244,14 @@ var expectedConf = &Config{
MetricsPath: "/metrics", MetricsPath: "/metrics",
Scheme: "http", Scheme: "http",
TLSConfig: TLSConfig{ HTTPClientConfig: HTTPClientConfig{
CertFile: "testdata/valid_cert_file", TLSConfig: TLSConfig{
KeyFile: "testdata/valid_key_file", CertFile: "testdata/valid_cert_file",
}, KeyFile: "testdata/valid_key_file",
},
BearerToken: "avalidtoken", BearerToken: "avalidtoken",
},
}, },
{ {
JobName: "service-kubernetes", JobName: "service-kubernetes",
@ -250,13 +262,15 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
KubernetesSDConfigs: []*KubernetesSDConfig{ ServiceDiscoveryConfig: ServiceDiscoveryConfig{
{ KubernetesSDConfigs: []*KubernetesSDConfig{
APIServer: kubernetesSDHostURL(), {
Role: KubernetesRoleEndpoint, APIServer: kubernetesSDHostURL(),
BasicAuth: &BasicAuth{ Role: KubernetesRoleEndpoint,
Username: "myusername", BasicAuth: &BasicAuth{
Password: "mypassword", Username: "myusername",
Password: "mypassword",
},
}, },
}, },
}, },
@ -270,16 +284,18 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
MarathonSDConfigs: []*MarathonSDConfig{ ServiceDiscoveryConfig: ServiceDiscoveryConfig{
{ MarathonSDConfigs: []*MarathonSDConfig{
Servers: []string{ {
"https://marathon.example.com:443", Servers: []string{
}, "https://marathon.example.com:443",
Timeout: model.Duration(30 * time.Second), },
RefreshInterval: model.Duration(30 * time.Second), Timeout: model.Duration(30 * time.Second),
TLSConfig: TLSConfig{ RefreshInterval: model.Duration(30 * time.Second),
CertFile: "testdata/valid_cert_file", TLSConfig: TLSConfig{
KeyFile: "testdata/valid_key_file", CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
}, },
}, },
}, },
@ -293,14 +309,16 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
EC2SDConfigs: []*EC2SDConfig{ ServiceDiscoveryConfig: ServiceDiscoveryConfig{
{ EC2SDConfigs: []*EC2SDConfig{
Region: "us-east-1", {
AccessKey: "access", Region: "us-east-1",
SecretKey: "secret", AccessKey: "access",
Profile: "profile", SecretKey: "secret",
RefreshInterval: model.Duration(60 * time.Second), Profile: "profile",
Port: 80, RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
},
}, },
}, },
}, },
@ -313,14 +331,16 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
AzureSDConfigs: []*AzureSDConfig{ ServiceDiscoveryConfig: ServiceDiscoveryConfig{
{ AzureSDConfigs: []*AzureSDConfig{
SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11", {
TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2", SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11",
ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C", TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2",
ClientSecret: "nAdvAK2oBuVym4IXix", ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C",
RefreshInterval: model.Duration(5 * time.Minute), ClientSecret: "nAdvAK2oBuVym4IXix",
Port: 9100, RefreshInterval: model.Duration(5 * time.Minute),
Port: 9100,
},
}, },
}, },
}, },
@ -333,11 +353,13 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
NerveSDConfigs: []*NerveSDConfig{ ServiceDiscoveryConfig: ServiceDiscoveryConfig{
{ NerveSDConfigs: []*NerveSDConfig{
Servers: []string{"localhost"}, {
Paths: []string{"/monitoring"}, Servers: []string{"localhost"},
Timeout: model.Duration(10 * time.Second), Paths: []string{"/monitoring"},
Timeout: model.Duration(10 * time.Second),
},
}, },
}, },
}, },
@ -350,10 +372,12 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
StaticConfigs: []*TargetGroup{ ServiceDiscoveryConfig: ServiceDiscoveryConfig{
{ StaticConfigs: []*TargetGroup{
Targets: []model.LabelSet{ {
{model.AddressLabel: "localhost:9090"}, Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
},
}, },
}, },
}, },
@ -367,10 +391,12 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
StaticConfigs: []*TargetGroup{ ServiceDiscoveryConfig: ServiceDiscoveryConfig{
{ StaticConfigs: []*TargetGroup{
Targets: []model.LabelSet{ {
{model.AddressLabel: "localhost:9090"}, Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
},
}, },
}, },
}, },

View file

@ -49,7 +49,7 @@ type TargetProvider interface {
} }
// ProvidersFromConfig returns all TargetProviders configured in cfg. // ProvidersFromConfig returns all TargetProviders configured in cfg.
func ProvidersFromConfig(cfg *config.ScrapeConfig) map[string]TargetProvider { func ProvidersFromConfig(cfg config.ServiceDiscoveryConfig) map[string]TargetProvider {
providers := map[string]TargetProvider{} providers := map[string]TargetProvider{}
app := func(mech string, i int, tp TargetProvider) { app := func(mech string, i int, tp TargetProvider) {

View file

@ -33,15 +33,14 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
} }
} }
scrapeConfig := &config.ScrapeConfig{} cfg := &config.ServiceDiscoveryConfig{}
sOne := ` sOne := `
job_name: "foo"
static_configs: static_configs:
- targets: ["foo:9090"] - targets: ["foo:9090"]
- targets: ["bar:9090"] - targets: ["bar:9090"]
` `
if err := yaml.Unmarshal([]byte(sOne), scrapeConfig); err != nil { if err := yaml.Unmarshal([]byte(sOne), cfg); err != nil {
t.Fatalf("Unable to load YAML config sOne: %s", err) t.Fatalf("Unable to load YAML config sOne: %s", err)
} }
called := make(chan struct{}) called := make(chan struct{})
@ -54,22 +53,21 @@ static_configs:
go ts.Run(ctx) go ts.Run(ctx)
ts.UpdateProviders(ProvidersFromConfig(scrapeConfig)) ts.UpdateProviders(ProvidersFromConfig(*cfg))
<-called <-called
verifyPresence(ts.tgroups, "static/0/0", true) verifyPresence(ts.tgroups, "static/0/0", true)
verifyPresence(ts.tgroups, "static/0/1", true) verifyPresence(ts.tgroups, "static/0/1", true)
sTwo := ` sTwo := `
job_name: "foo"
static_configs: static_configs:
- targets: ["foo:9090"] - targets: ["foo:9090"]
` `
if err := yaml.Unmarshal([]byte(sTwo), scrapeConfig); err != nil { if err := yaml.Unmarshal([]byte(sTwo), cfg); err != nil {
t.Fatalf("Unable to load YAML config sTwo: %s", err) t.Fatalf("Unable to load YAML config sTwo: %s", err)
} }
ts.UpdateProviders(ProvidersFromConfig(scrapeConfig)) ts.UpdateProviders(ProvidersFromConfig(*cfg))
<-called <-called
verifyPresence(ts.tgroups, "static/0/0", true) verifyPresence(ts.tgroups, "static/0/0", true)

View file

@ -106,7 +106,7 @@ type scrapePool struct {
} }
func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app storage.SampleAppender) *scrapePool { func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app storage.SampleAppender) *scrapePool {
client, err := NewHTTPClient(cfg) client, err := NewHTTPClient(cfg.HTTPClientConfig)
if err != nil { if err != nil {
// Any errors that could occur here should be caught during config validation. // Any errors that could occur here should be caught during config validation.
log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err) log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)
@ -153,7 +153,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
sp.mtx.Lock() sp.mtx.Lock()
defer sp.mtx.Unlock() defer sp.mtx.Unlock()
client, err := NewHTTPClient(cfg) client, err := NewHTTPClient(cfg.HTTPClientConfig)
if err != nil { if err != nil {
// Any errors that could occur here should be caught during config validation. // Any errors that could occur here should be caught during config validation.
log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err) log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)

View file

@ -68,7 +68,7 @@ func NewTarget(labels, metaLabels model.LabelSet, params url.Values) *Target {
} }
// NewHTTPClient returns a new HTTP client configured for the given scrape configuration. // NewHTTPClient returns a new HTTP client configured for the given scrape configuration.
func NewHTTPClient(cfg *config.ScrapeConfig) (*http.Client, error) { func NewHTTPClient(cfg config.HTTPClientConfig) (*http.Client, error) {
tlsConfig, err := httputil.NewTLSConfig(cfg.TLSConfig) tlsConfig, err := httputil.NewTLSConfig(cfg.TLSConfig)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -151,9 +151,8 @@ func TestNewHTTPBearerToken(t *testing.T) {
) )
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second), BearerToken: "1234",
BearerToken: "1234",
} }
c, err := NewHTTPClient(cfg) c, err := NewHTTPClient(cfg)
if err != nil { if err != nil {
@ -179,8 +178,7 @@ func TestNewHTTPBearerTokenFile(t *testing.T) {
) )
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
BearerTokenFile: "testdata/bearertoken.txt", BearerTokenFile: "testdata/bearertoken.txt",
} }
c, err := NewHTTPClient(cfg) c, err := NewHTTPClient(cfg)
@ -206,8 +204,7 @@ func TestNewHTTPBasicAuth(t *testing.T) {
) )
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
BasicAuth: &config.BasicAuth{ BasicAuth: &config.BasicAuth{
Username: "user", Username: "user",
Password: "password123", Password: "password123",
@ -236,8 +233,7 @@ func TestNewHTTPCACert(t *testing.T) {
server.StartTLS() server.StartTLS()
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
TLSConfig: config.TLSConfig{ TLSConfig: config.TLSConfig{
CAFile: caCertPath, CAFile: caCertPath,
}, },
@ -269,8 +265,7 @@ func TestNewHTTPClientCert(t *testing.T) {
server.StartTLS() server.StartTLS()
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
TLSConfig: config.TLSConfig{ TLSConfig: config.TLSConfig{
CAFile: caCertPath, CAFile: caCertPath,
CertFile: "testdata/client.cer", CertFile: "testdata/client.cer",
@ -300,8 +295,7 @@ func TestNewHTTPWithServerName(t *testing.T) {
server.StartTLS() server.StartTLS()
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
TLSConfig: config.TLSConfig{ TLSConfig: config.TLSConfig{
CAFile: caCertPath, CAFile: caCertPath,
ServerName: "prometheus.rocks", ServerName: "prometheus.rocks",
@ -330,8 +324,7 @@ func TestNewHTTPWithBadServerName(t *testing.T) {
server.StartTLS() server.StartTLS()
defer server.Close() defer server.Close()
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
TLSConfig: config.TLSConfig{ TLSConfig: config.TLSConfig{
CAFile: caCertPath, CAFile: caCertPath,
ServerName: "badname", ServerName: "badname",
@ -369,8 +362,7 @@ func newTLSConfig(certName string, t *testing.T) *tls.Config {
} }
func TestNewClientWithBadTLSConfig(t *testing.T) { func TestNewClientWithBadTLSConfig(t *testing.T) {
cfg := &config.ScrapeConfig{ cfg := config.HTTPClientConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
TLSConfig: config.TLSConfig{ TLSConfig: config.TLSConfig{
CAFile: "testdata/nonexistent_ca.cer", CAFile: "testdata/nonexistent_ca.cer",
CertFile: "testdata/nonexistent_client.cer", CertFile: "testdata/nonexistent_client.cer",

View file

@ -120,7 +120,7 @@ func (tm *TargetManager) reload() {
} else { } else {
ts.sp.reload(scfg) ts.sp.reload(scfg)
} }
ts.ts.UpdateProviders(discovery.ProvidersFromConfig(scfg)) ts.ts.UpdateProviders(discovery.ProvidersFromConfig(scfg.ServiceDiscoveryConfig))
} }
// Remove old target sets. Waiting for scrape pools to complete pending // Remove old target sets. Waiting for scrape pools to complete pending