config: extract SD and HTTPClient configurations

This commit is contained in:
Fabian Reinartz 2016-11-23 12:41:19 +01:00
parent dd1a656cc4
commit 200bbe1bad
9 changed files with 240 additions and 164 deletions

View file

@ -102,15 +102,15 @@ func checkConfig(t cli.Term, filename string) ([]string, error) {
}
for _, scfg := range cfg.ScrapeConfigs {
if err := checkFileExists(scfg.BearerTokenFile); err != nil {
return nil, fmt.Errorf("error checking bearer token file %q: %s", scfg.BearerTokenFile, err)
if err := checkFileExists(scfg.HTTPClientConfig.BearerTokenFile); err != nil {
return nil, fmt.Errorf("error checking bearer token file %q: %s", scfg.HTTPClientConfig.BearerTokenFile, err)
}
if err := checkTLSConfig(scfg.TLSConfig); err != nil {
if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig); err != nil {
return nil, err
}
for _, kd := range scfg.KubernetesSDConfigs {
for _, kd := range scfg.ServiceDiscoveryConfig.KubernetesSDConfigs {
if err := checkTLSConfig(kd.TLSConfig); err != nil {
return nil, err
}

View file

@ -87,6 +87,11 @@ var (
HonorLabels: false,
}
// DefaultAlertmanagersConfig is the default alertmanager configuration.
DefaultAlertmanagersConfig = AlertmanagersConfig{
Scheme: "http",
}
// DefaultRelabelConfig is the default Relabel configuration.
DefaultRelabelConfig = RelabelConfig{
Action: RelabelReplace,
@ -214,20 +219,22 @@ func resolveFilepaths(baseDir string, cfg *Config) {
cfg.RuleFiles[i] = join(rf)
}
for _, scfg := range cfg.ScrapeConfigs {
for _, cfg := range cfg.ScrapeConfigs {
scfg := &cfg.HTTPClientConfig
scfg.BearerTokenFile = join(scfg.BearerTokenFile)
scfg.TLSConfig.CAFile = join(scfg.TLSConfig.CAFile)
scfg.TLSConfig.CertFile = join(scfg.TLSConfig.CertFile)
scfg.TLSConfig.KeyFile = join(scfg.TLSConfig.KeyFile)
for _, kcfg := range scfg.KubernetesSDConfigs {
for _, kcfg := range cfg.ServiceDiscoveryConfig.KubernetesSDConfigs {
kcfg.BearerTokenFile = join(kcfg.BearerTokenFile)
kcfg.TLSConfig.CAFile = join(kcfg.TLSConfig.CAFile)
kcfg.TLSConfig.CertFile = join(kcfg.TLSConfig.CertFile)
kcfg.TLSConfig.KeyFile = join(kcfg.TLSConfig.KeyFile)
}
for _, mcfg := range scfg.MarathonSDConfigs {
for _, mcfg := range cfg.ServiceDiscoveryConfig.MarathonSDConfigs {
mcfg.TLSConfig.CAFile = join(mcfg.TLSConfig.CAFile)
mcfg.TLSConfig.CertFile = join(mcfg.TLSConfig.CertFile)
mcfg.TLSConfig.KeyFile = join(mcfg.TLSConfig.KeyFile)
@ -312,11 +319,6 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil
}
// AlertingConfig configures alerting and alertmanager related configs
type AlertingConfig struct {
AlertRelabelConfigs []*RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
}
// GlobalConfig configures values that are used across other configuration
// objects.
type GlobalConfig struct {
@ -404,33 +406,8 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil
}
// ScrapeConfig configures a scraping unit for Prometheus.
type ScrapeConfig struct {
// The job name to which the job label is set by default.
JobName string `yaml:"job_name"`
// Indicator whether the scraped metrics should remain unmodified.
HonorLabels bool `yaml:"honor_labels,omitempty"`
// A set of query parameters with which the target is scraped.
Params url.Values `yaml:"params,omitempty"`
// How frequently to scrape the targets of this scrape config.
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The timeout for scraping targets of this config.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
// The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"`
// The HTTP basic authentication credentials for the targets.
BasicAuth *BasicAuth `yaml:"basic_auth,omitempty"`
// The bearer token for the targets.
BearerToken string `yaml:"bearer_token,omitempty"`
// The bearer token file for the targets.
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
// HTTP proxy server to use to connect to the targets.
ProxyURL URL `yaml:"proxy_url,omitempty"`
// TLSConfig to use to connect to the targets.
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
// ServiceDiscoveryConfig configures lists of different service discovery mechanisms.
type ServiceDiscoveryConfig struct {
// List of labeled target groups for this job.
StaticConfigs []*TargetGroup `yaml:"static_configs,omitempty"`
// List of DNS service discovery configurations.
@ -454,6 +431,62 @@ type ScrapeConfig struct {
// List of Azure service discovery configurations.
AzureSDConfigs []*AzureSDConfig `yaml:"azure_sd_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ServiceDiscoveryConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain ServiceDiscoveryConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
if err := checkOverflow(c.XXX, "TLS config"); err != nil {
return err
}
return nil
}
// HTTPClientConfig configures an HTTP client.
type HTTPClientConfig struct {
// The HTTP basic authentication credentials for the targets.
BasicAuth *BasicAuth `yaml:"basic_auth,omitempty"`
// The bearer token for the targets.
BearerToken string `yaml:"bearer_token,omitempty"`
// The bearer token file for the targets.
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
// HTTP proxy server to use to connect to the targets.
ProxyURL URL `yaml:"proxy_url,omitempty"`
// TLSConfig to use to connect to the targets.
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// ScrapeConfig configures a scraping unit for Prometheus.
type ScrapeConfig struct {
// The job name to which the job label is set by default.
JobName string `yaml:"job_name"`
// Indicator whether the scraped metrics should remain unmodified.
HonorLabels bool `yaml:"honor_labels,omitempty"`
// A set of query parameters with which the target is scraped.
Params url.Values `yaml:"params,omitempty"`
// How frequently to scrape the targets of this scrape config.
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The timeout for scraping targets of this config.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
// The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
ServiceDiscoveryConfig ServiceDiscoveryConfig `yaml:",inline"`
HTTPClientConfig HTTPClientConfig `yaml:",inline"`
// List of target relabel configurations.
RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"`
// List of metric relabel configurations.
@ -477,15 +510,19 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if len(c.JobName) == 0 {
return fmt.Errorf("job_name is empty")
}
if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 {
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
if len(c.HTTPClientConfig.BearerToken) > 0 && len(c.HTTPClientConfig.BearerTokenFile) > 0 {
return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured")
}
if c.BasicAuth != nil && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) {
if c.HTTPClientConfig.BasicAuth != nil && (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) {
return fmt.Errorf("at most one of basic_auth, bearer_token & bearer_token_file must be configured")
}
// Check for users putting URLs in target groups.
if len(c.RelabelConfigs) == 0 {
for _, tg := range c.StaticConfigs {
for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
for _, t := range tg.Targets {
if err = CheckTargetAddress(t[model.AddressLabel]); err != nil {
return err
@ -496,6 +533,29 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil
}
// AlertingConfig configures alerting and alertmanager related configs
type AlertingConfig struct {
AlertRelabelConfigs []*RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// Create a clean global config as the previous one was already populated
// by the default due to the YAML parser behavior for empty blocks.
*c = AlertingConfig{}
type plain AlertingConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
if err := checkOverflow(c.XXX, "alerting config"); err != nil {
return err
}
return nil
}
// CheckTargetAddress checks if target address is valid.
func CheckTargetAddress(address model.LabelValue) error {
// For now check for a URL, we may want to expand this later.

View file

@ -68,29 +68,33 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
BearerTokenFile: "testdata/valid_token_file",
StaticConfigs: []*TargetGroup{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
{model.AddressLabel: "localhost:9191"},
},
Labels: model.LabelSet{
"my": "label",
"your": "label",
},
},
HTTPClientConfig: HTTPClientConfig{
BearerTokenFile: "testdata/valid_token_file",
},
FileSDConfigs: []*FileSDConfig{
{
Files: []string{"foo/*.slow.json", "foo/*.slow.yml", "single/file.yml"},
RefreshInterval: model.Duration(10 * time.Minute),
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
StaticConfigs: []*TargetGroup{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
{model.AddressLabel: "localhost:9191"},
},
Labels: model.LabelSet{
"my": "label",
"your": "label",
},
},
},
{
Files: []string{"bar/*.yaml"},
RefreshInterval: model.Duration(5 * time.Minute),
FileSDConfigs: []*FileSDConfig{
{
Files: []string{"foo/*.slow.json", "foo/*.slow.yml", "single/file.yml"},
RefreshInterval: model.Duration(10 * time.Minute),
},
{
Files: []string{"bar/*.yaml"},
RefreshInterval: model.Duration(5 * time.Minute),
},
},
},
@ -130,28 +134,32 @@ var expectedConf = &Config{
ScrapeInterval: model.Duration(50 * time.Second),
ScrapeTimeout: model.Duration(5 * time.Second),
BasicAuth: &BasicAuth{
Username: "admin_name",
Password: "admin_password",
HTTPClientConfig: HTTPClientConfig{
BasicAuth: &BasicAuth{
Username: "admin_name",
Password: "admin_password",
},
},
MetricsPath: "/my_path",
Scheme: "https",
DNSSDConfigs: []*DNSSDConfig{
{
Names: []string{
"first.dns.address.domain.com",
"second.dns.address.domain.com",
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
DNSSDConfigs: []*DNSSDConfig{
{
Names: []string{
"first.dns.address.domain.com",
"second.dns.address.domain.com",
},
RefreshInterval: model.Duration(15 * time.Second),
Type: "SRV",
},
RefreshInterval: model.Duration(15 * time.Second),
Type: "SRV",
},
{
Names: []string{
"first.dns.address.domain.com",
{
Names: []string{
"first.dns.address.domain.com",
},
RefreshInterval: model.Duration(30 * time.Second),
Type: "SRV",
},
RefreshInterval: model.Duration(30 * time.Second),
Type: "SRV",
},
},
@ -205,12 +213,14 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ConsulSDConfigs: []*ConsulSDConfig{
{
Server: "localhost:1234",
Services: []string{"nginx", "cache", "mysql"},
TagSeparator: DefaultConsulSDConfig.TagSeparator,
Scheme: DefaultConsulSDConfig.Scheme,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
ConsulSDConfigs: []*ConsulSDConfig{
{
Server: "localhost:1234",
Services: []string{"nginx", "cache", "mysql"},
TagSeparator: DefaultConsulSDConfig.TagSeparator,
Scheme: DefaultConsulSDConfig.Scheme,
},
},
},
@ -234,12 +244,14 @@ var expectedConf = &Config{
MetricsPath: "/metrics",
Scheme: "http",
TLSConfig: TLSConfig{
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
HTTPClientConfig: HTTPClientConfig{
TLSConfig: TLSConfig{
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
BearerToken: "avalidtoken",
BearerToken: "avalidtoken",
},
},
{
JobName: "service-kubernetes",
@ -250,13 +262,15 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
KubernetesSDConfigs: []*KubernetesSDConfig{
{
APIServer: kubernetesSDHostURL(),
Role: KubernetesRoleEndpoint,
BasicAuth: &BasicAuth{
Username: "myusername",
Password: "mypassword",
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
KubernetesSDConfigs: []*KubernetesSDConfig{
{
APIServer: kubernetesSDHostURL(),
Role: KubernetesRoleEndpoint,
BasicAuth: &BasicAuth{
Username: "myusername",
Password: "mypassword",
},
},
},
},
@ -270,16 +284,18 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
MarathonSDConfigs: []*MarathonSDConfig{
{
Servers: []string{
"https://marathon.example.com:443",
},
Timeout: model.Duration(30 * time.Second),
RefreshInterval: model.Duration(30 * time.Second),
TLSConfig: TLSConfig{
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
MarathonSDConfigs: []*MarathonSDConfig{
{
Servers: []string{
"https://marathon.example.com:443",
},
Timeout: model.Duration(30 * time.Second),
RefreshInterval: model.Duration(30 * time.Second),
TLSConfig: TLSConfig{
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
},
},
},
@ -293,14 +309,16 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
EC2SDConfigs: []*EC2SDConfig{
{
Region: "us-east-1",
AccessKey: "access",
SecretKey: "secret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
EC2SDConfigs: []*EC2SDConfig{
{
Region: "us-east-1",
AccessKey: "access",
SecretKey: "secret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
},
},
},
},
@ -313,14 +331,16 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
AzureSDConfigs: []*AzureSDConfig{
{
SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11",
TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2",
ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C",
ClientSecret: "nAdvAK2oBuVym4IXix",
RefreshInterval: model.Duration(5 * time.Minute),
Port: 9100,
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
AzureSDConfigs: []*AzureSDConfig{
{
SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11",
TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2",
ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C",
ClientSecret: "nAdvAK2oBuVym4IXix",
RefreshInterval: model.Duration(5 * time.Minute),
Port: 9100,
},
},
},
},
@ -333,11 +353,13 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
NerveSDConfigs: []*NerveSDConfig{
{
Servers: []string{"localhost"},
Paths: []string{"/monitoring"},
Timeout: model.Duration(10 * time.Second),
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
NerveSDConfigs: []*NerveSDConfig{
{
Servers: []string{"localhost"},
Paths: []string{"/monitoring"},
Timeout: model.Duration(10 * time.Second),
},
},
},
},
@ -350,10 +372,12 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
StaticConfigs: []*TargetGroup{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
StaticConfigs: []*TargetGroup{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
},
},
},
},
@ -367,10 +391,12 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
StaticConfigs: []*TargetGroup{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
ServiceDiscoveryConfig: ServiceDiscoveryConfig{
StaticConfigs: []*TargetGroup{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
},
},
},
},

View file

@ -49,7 +49,7 @@ type TargetProvider interface {
}
// ProvidersFromConfig returns all TargetProviders configured in cfg.
func ProvidersFromConfig(cfg *config.ScrapeConfig) map[string]TargetProvider {
func ProvidersFromConfig(cfg config.ServiceDiscoveryConfig) map[string]TargetProvider {
providers := map[string]TargetProvider{}
app := func(mech string, i int, tp TargetProvider) {

View file

@ -33,15 +33,14 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
}
}
scrapeConfig := &config.ScrapeConfig{}
cfg := &config.ServiceDiscoveryConfig{}
sOne := `
job_name: "foo"
static_configs:
- targets: ["foo:9090"]
- targets: ["bar:9090"]
`
if err := yaml.Unmarshal([]byte(sOne), scrapeConfig); err != nil {
if err := yaml.Unmarshal([]byte(sOne), cfg); err != nil {
t.Fatalf("Unable to load YAML config sOne: %s", err)
}
called := make(chan struct{})
@ -54,22 +53,21 @@ static_configs:
go ts.Run(ctx)
ts.UpdateProviders(ProvidersFromConfig(scrapeConfig))
ts.UpdateProviders(ProvidersFromConfig(*cfg))
<-called
verifyPresence(ts.tgroups, "static/0/0", true)
verifyPresence(ts.tgroups, "static/0/1", true)
sTwo := `
job_name: "foo"
static_configs:
- targets: ["foo:9090"]
`
if err := yaml.Unmarshal([]byte(sTwo), scrapeConfig); err != nil {
if err := yaml.Unmarshal([]byte(sTwo), cfg); err != nil {
t.Fatalf("Unable to load YAML config sTwo: %s", err)
}
ts.UpdateProviders(ProvidersFromConfig(scrapeConfig))
ts.UpdateProviders(ProvidersFromConfig(*cfg))
<-called
verifyPresence(ts.tgroups, "static/0/0", true)

View file

@ -106,7 +106,7 @@ type scrapePool struct {
}
func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app storage.SampleAppender) *scrapePool {
client, err := NewHTTPClient(cfg)
client, err := NewHTTPClient(cfg.HTTPClientConfig)
if err != nil {
// Any errors that could occur here should be caught during config validation.
log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)
@ -153,7 +153,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
sp.mtx.Lock()
defer sp.mtx.Unlock()
client, err := NewHTTPClient(cfg)
client, err := NewHTTPClient(cfg.HTTPClientConfig)
if err != nil {
// Any errors that could occur here should be caught during config validation.
log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)

View file

@ -68,7 +68,7 @@ func NewTarget(labels, metaLabels model.LabelSet, params url.Values) *Target {
}
// NewHTTPClient returns a new HTTP client configured for the given scrape configuration.
func NewHTTPClient(cfg *config.ScrapeConfig) (*http.Client, error) {
func NewHTTPClient(cfg config.HTTPClientConfig) (*http.Client, error) {
tlsConfig, err := httputil.NewTLSConfig(cfg.TLSConfig)
if err != nil {
return nil, err

View file

@ -151,9 +151,8 @@ func TestNewHTTPBearerToken(t *testing.T) {
)
defer server.Close()
cfg := &config.ScrapeConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
BearerToken: "1234",
cfg := config.HTTPClientConfig{
BearerToken: "1234",
}
c, err := NewHTTPClient(cfg)
if err != nil {
@ -179,8 +178,7 @@ func TestNewHTTPBearerTokenFile(t *testing.T) {
)
defer server.Close()
cfg := &config.ScrapeConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
cfg := config.HTTPClientConfig{
BearerTokenFile: "testdata/bearertoken.txt",
}
c, err := NewHTTPClient(cfg)
@ -206,8 +204,7 @@ func TestNewHTTPBasicAuth(t *testing.T) {
)
defer server.Close()
cfg := &config.ScrapeConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
cfg := config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
Username: "user",
Password: "password123",
@ -236,8 +233,7 @@ func TestNewHTTPCACert(t *testing.T) {
server.StartTLS()
defer server.Close()
cfg := &config.ScrapeConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
cfg := config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CAFile: caCertPath,
},
@ -269,8 +265,7 @@ func TestNewHTTPClientCert(t *testing.T) {
server.StartTLS()
defer server.Close()
cfg := &config.ScrapeConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
cfg := config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CAFile: caCertPath,
CertFile: "testdata/client.cer",
@ -300,8 +295,7 @@ func TestNewHTTPWithServerName(t *testing.T) {
server.StartTLS()
defer server.Close()
cfg := &config.ScrapeConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
cfg := config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CAFile: caCertPath,
ServerName: "prometheus.rocks",
@ -330,8 +324,7 @@ func TestNewHTTPWithBadServerName(t *testing.T) {
server.StartTLS()
defer server.Close()
cfg := &config.ScrapeConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
cfg := config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CAFile: caCertPath,
ServerName: "badname",
@ -369,8 +362,7 @@ func newTLSConfig(certName string, t *testing.T) *tls.Config {
}
func TestNewClientWithBadTLSConfig(t *testing.T) {
cfg := &config.ScrapeConfig{
ScrapeTimeout: model.Duration(1 * time.Second),
cfg := config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CAFile: "testdata/nonexistent_ca.cer",
CertFile: "testdata/nonexistent_client.cer",

View file

@ -120,7 +120,7 @@ func (tm *TargetManager) reload() {
} else {
ts.sp.reload(scfg)
}
ts.ts.UpdateProviders(discovery.ProvidersFromConfig(scfg))
ts.ts.UpdateProviders(discovery.ProvidersFromConfig(scfg.ServiceDiscoveryConfig))
}
// Remove old target sets. Waiting for scrape pools to complete pending